Uncommitted changes using the wrong author, and re-committing under the correct author
This commit is contained in:
commit
c097223390
48
.mailmap
48
.mailmap
@ -1,35 +1,43 @@
|
|||||||
# Format is:
|
# Format is:
|
||||||
# <preferred e-mail> <other e-mail>
|
# <preferred e-mail> <other e-mail 1>
|
||||||
<code@term.ie> <github@anarkystic.com>
|
# <preferred e-mail> <other e-mail 2>
|
||||||
<code@term.ie> <termie@preciousroy.local>
|
|
||||||
<Armando.Migliaccio@eu.citrix.com> <armando.migliaccio@citrix.com>
|
|
||||||
<matt.dietz@rackspace.com> <matthewdietz@Matthew-Dietzs-MacBook-Pro.local>
|
|
||||||
<matt.dietz@rackspace.com> <mdietz@openstack>
|
|
||||||
<cbehrens@codestud.com> <chris.behrens@rackspace.com>
|
|
||||||
<devin.carlen@gmail.com> <devcamcar@illian.local>
|
|
||||||
<ewan.mellor@citrix.com> <emellor@silver>
|
|
||||||
<jaypipes@gmail.com> <jpipes@serialcoder>
|
|
||||||
<anotherjesse@gmail.com> <jesse@dancelamb>
|
<anotherjesse@gmail.com> <jesse@dancelamb>
|
||||||
<anotherjesse@gmail.com> <jesse@gigantor.local>
|
<anotherjesse@gmail.com> <jesse@gigantor.local>
|
||||||
<anotherjesse@gmail.com> <jesse@ubuntu>
|
<anotherjesse@gmail.com> <jesse@ubuntu>
|
||||||
<jmckenty@gmail.com> <jmckenty@yyj-dhcp171.corp.flock.com>
|
<ant@openstack.org> <amesserl@rackspace.com>
|
||||||
|
<Armando.Migliaccio@eu.citrix.com> <armando.migliaccio@citrix.com>
|
||||||
|
<brian.lamar@rackspace.com> <brian.lamar@gmail.com>
|
||||||
|
<bschott@isi.edu> <bfschott@gmail.com>
|
||||||
|
<cbehrens@codestud.com> <chris.behrens@rackspace.com>
|
||||||
|
<chiradeep@cloud.com> <chiradeep@chiradeep-lt2>
|
||||||
|
<code@term.ie> <github@anarkystic.com>
|
||||||
|
<code@term.ie> <termie@preciousroy.local>
|
||||||
|
<corywright@gmail.com> <cory.wright@rackspace.com>
|
||||||
|
<devin.carlen@gmail.com> <devcamcar@illian.local>
|
||||||
|
<ewan.mellor@citrix.com> <emellor@silver>
|
||||||
|
<jaypipes@gmail.com> <jpipes@serialcoder>
|
||||||
<jmckenty@gmail.com> <jmckenty@joshua-mckentys-macbook-pro.local>
|
<jmckenty@gmail.com> <jmckenty@joshua-mckentys-macbook-pro.local>
|
||||||
|
<jmckenty@gmail.com> <jmckenty@yyj-dhcp171.corp.flock.com>
|
||||||
<jmckenty@gmail.com> <joshua.mckenty@nasa.gov>
|
<jmckenty@gmail.com> <joshua.mckenty@nasa.gov>
|
||||||
<justin@fathomdb.com> <justinsb@justinsb-desktop>
|
<justin@fathomdb.com> <justinsb@justinsb-desktop>
|
||||||
<masumotok@nttdata.co.jp> <root@openstack2-api>
|
<justin@fathomdb.com> <superstack@superstack.org>
|
||||||
<masumotok@nttdata.co.jp> Masumoto<masumotok@nttdata.co.jp>
|
<masumotok@nttdata.co.jp> Masumoto<masumotok@nttdata.co.jp>
|
||||||
|
<masumotok@nttdata.co.jp> <root@openstack2-api>
|
||||||
|
<matt.dietz@rackspace.com> <matthewdietz@Matthew-Dietzs-MacBook-Pro.local>
|
||||||
|
<matt.dietz@rackspace.com> <mdietz@openstack>
|
||||||
<mordred@inaugust.com> <mordred@hudson>
|
<mordred@inaugust.com> <mordred@hudson>
|
||||||
<paul@openstack.org> <pvoccio@castor.local>
|
|
||||||
<paul@openstack.org> <paul.voccio@rackspace.com>
|
<paul@openstack.org> <paul.voccio@rackspace.com>
|
||||||
|
<paul@openstack.org> <pvoccio@castor.local>
|
||||||
|
<rconradharris@gmail.com> <rick.harris@rackspace.com>
|
||||||
|
<rlane@wikimedia.org> <laner@controller>
|
||||||
|
<sleepsonthefloor@gmail.com> <root@tonbuntu>
|
||||||
<soren.hansen@rackspace.com> <soren@linux2go.dk>
|
<soren.hansen@rackspace.com> <soren@linux2go.dk>
|
||||||
<todd@ansolabs.com> <todd@lapex>
|
<todd@ansolabs.com> <todd@lapex>
|
||||||
<todd@ansolabs.com> <todd@rubidine.com>
|
<todd@ansolabs.com> <todd@rubidine.com>
|
||||||
<vishvananda@gmail.com> <vishvananda@yahoo.com>
|
<tushar.vitthal.patil@gmail.com> <tpatil@vertex.co.in>
|
||||||
|
<ueno.nachi@lab.ntt.co.jp> <nati.ueno@gmail.com>
|
||||||
|
<ueno.nachi@lab.ntt.co.jp> <nova@u4>
|
||||||
|
<ueno.nachi@lab.ntt.co.jp> <openstack@lab.ntt.co.jp>
|
||||||
<vishvananda@gmail.com> <root@mirror.nasanebula.net>
|
<vishvananda@gmail.com> <root@mirror.nasanebula.net>
|
||||||
<vishvananda@gmail.com> <root@ubuntu>
|
<vishvananda@gmail.com> <root@ubuntu>
|
||||||
<sleepsonthefloor@gmail.com> <root@tonbuntu>
|
<vishvananda@gmail.com> <vishvananda@yahoo.com>
|
||||||
<rlane@wikimedia.org> <laner@controller>
|
|
||||||
<rconradharris@gmail.com> <rick.harris@rackspace.com>
|
|
||||||
<corywright@gmail.com> <cory.wright@rackspace.com>
|
|
||||||
<ant@openstack.org> <amesserl@rackspace.com>
|
|
||||||
<chiradeep@cloud.com> <chiradeep@chiradeep-lt2>
|
|
||||||
|
14
Authors
14
Authors
@ -3,12 +3,17 @@ Anne Gentle <anne@openstack.org>
|
|||||||
Anthony Young <sleepsonthefloor@gmail.com>
|
Anthony Young <sleepsonthefloor@gmail.com>
|
||||||
Antony Messerli <ant@openstack.org>
|
Antony Messerli <ant@openstack.org>
|
||||||
Armando Migliaccio <Armando.Migliaccio@eu.citrix.com>
|
Armando Migliaccio <Armando.Migliaccio@eu.citrix.com>
|
||||||
|
Bilal Akhtar <bilalakhtar@ubuntu.com>
|
||||||
|
Brian Lamar <brian.lamar@rackspace.com>
|
||||||
|
Brian Schott <bschott@isi.edu>
|
||||||
|
Brian Waldon <brian.waldon@rackspace.com>
|
||||||
Chiradeep Vittal <chiradeep@cloud.com>
|
Chiradeep Vittal <chiradeep@cloud.com>
|
||||||
Chmouel Boudjnah <chmouel@chmouel.com>
|
Chmouel Boudjnah <chmouel@chmouel.com>
|
||||||
Chris Behrens <cbehrens@codestud.com>
|
Chris Behrens <cbehrens@codestud.com>
|
||||||
|
Christian Berendt <berendt@b1-systems.de>
|
||||||
Cory Wright <corywright@gmail.com>
|
Cory Wright <corywright@gmail.com>
|
||||||
David Pravec <David.Pravec@danix.org>
|
|
||||||
Dan Prince <dan.prince@rackspace.com>
|
Dan Prince <dan.prince@rackspace.com>
|
||||||
|
David Pravec <David.Pravec@danix.org>
|
||||||
Dean Troyer <dtroyer@gmail.com>
|
Dean Troyer <dtroyer@gmail.com>
|
||||||
Devin Carlen <devin.carlen@gmail.com>
|
Devin Carlen <devin.carlen@gmail.com>
|
||||||
Ed Leafe <ed@leafe.com>
|
Ed Leafe <ed@leafe.com>
|
||||||
@ -39,8 +44,10 @@ Monsyne Dragon <mdragon@rackspace.com>
|
|||||||
Monty Taylor <mordred@inaugust.com>
|
Monty Taylor <mordred@inaugust.com>
|
||||||
MORITA Kazutaka <morita.kazutaka@gmail.com>
|
MORITA Kazutaka <morita.kazutaka@gmail.com>
|
||||||
Muneyuki Noguchi <noguchimn@nttdata.co.jp>
|
Muneyuki Noguchi <noguchimn@nttdata.co.jp>
|
||||||
Nachi Ueno <ueno.nachi@lab.ntt.co.jp> <openstack@lab.ntt.co.jp> <nati.ueno@gmail.com> <nova@u4>
|
Nachi Ueno <ueno.nachi@lab.ntt.co.jp>
|
||||||
|
Naveed Massjouni <naveed.massjouni@rackspace.com>
|
||||||
Paul Voccio <paul@openstack.org>
|
Paul Voccio <paul@openstack.org>
|
||||||
|
Ricardo Carrillo Cruz <emaildericky@gmail.com>
|
||||||
Rick Clark <rick@openstack.org>
|
Rick Clark <rick@openstack.org>
|
||||||
Rick Harris <rconradharris@gmail.com>
|
Rick Harris <rconradharris@gmail.com>
|
||||||
Rob Kost <kost@isi.edu>
|
Rob Kost <kost@isi.edu>
|
||||||
@ -52,7 +59,8 @@ Soren Hansen <soren.hansen@rackspace.com>
|
|||||||
Thierry Carrez <thierry@openstack.org>
|
Thierry Carrez <thierry@openstack.org>
|
||||||
Todd Willey <todd@ansolabs.com>
|
Todd Willey <todd@ansolabs.com>
|
||||||
Trey Morris <trey.morris@rackspace.com>
|
Trey Morris <trey.morris@rackspace.com>
|
||||||
Tushar Patil <tushar.vitthal.patil@gmail.com> <tpatil@vertex.co.in>
|
Tushar Patil <tushar.vitthal.patil@gmail.com>
|
||||||
|
Vasiliy Shlykov <vash@vasiliyshlykov.org>
|
||||||
Vishvananda Ishaya <vishvananda@gmail.com>
|
Vishvananda Ishaya <vishvananda@gmail.com>
|
||||||
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
|
Youcef Laribi <Youcef.Laribi@eu.citrix.com>
|
||||||
Zhixue Wu <Zhixue.Wu@citrix.com>
|
Zhixue Wu <Zhixue.Wu@citrix.com>
|
||||||
|
19
HACKING
19
HACKING
@ -47,3 +47,22 @@ Human Alphabetical Order Examples
|
|||||||
from nova.auth import users
|
from nova.auth import users
|
||||||
from nova.endpoint import api
|
from nova.endpoint import api
|
||||||
from nova.endpoint import cloud
|
from nova.endpoint import cloud
|
||||||
|
|
||||||
|
Docstrings
|
||||||
|
----------
|
||||||
|
"""Summary of the function, class or method, less than 80 characters.
|
||||||
|
|
||||||
|
New paragraph after newline that explains in more detail any general
|
||||||
|
information about the function, class or method. After this, if defining
|
||||||
|
parameters and return types use the Sphinx format. After that an extra
|
||||||
|
newline then close the quotations.
|
||||||
|
|
||||||
|
When writing the docstring for a class, an extra line should be placed
|
||||||
|
after the closing quotations. For more in-depth explanations for these
|
||||||
|
decisions see http://www.python.org/dev/peps/pep-0257/
|
||||||
|
|
||||||
|
:param foo: the foo parameter
|
||||||
|
:param bar: the bar parameter
|
||||||
|
:returns: description of the return value
|
||||||
|
|
||||||
|
"""
|
||||||
|
10
MANIFEST.in
10
MANIFEST.in
@ -6,14 +6,23 @@ graft doc
|
|||||||
graft smoketests
|
graft smoketests
|
||||||
graft tools
|
graft tools
|
||||||
graft etc
|
graft etc
|
||||||
|
graft bzrplugins
|
||||||
|
graft contrib
|
||||||
|
graft po
|
||||||
|
graft plugins
|
||||||
include nova/api/openstack/notes.txt
|
include nova/api/openstack/notes.txt
|
||||||
|
include nova/auth/*.schema
|
||||||
include nova/auth/novarc.template
|
include nova/auth/novarc.template
|
||||||
|
include nova/auth/opendj.sh
|
||||||
include nova/auth/slap.sh
|
include nova/auth/slap.sh
|
||||||
include nova/cloudpipe/bootscript.sh
|
include nova/cloudpipe/bootscript.sh
|
||||||
include nova/cloudpipe/client.ovpn.template
|
include nova/cloudpipe/client.ovpn.template
|
||||||
|
include nova/cloudpipe/bootscript.template
|
||||||
include nova/compute/fakevirtinstance.xml
|
include nova/compute/fakevirtinstance.xml
|
||||||
include nova/compute/interfaces.template
|
include nova/compute/interfaces.template
|
||||||
|
include nova/console/xvp.conf.template
|
||||||
include nova/db/sqlalchemy/migrate_repo/migrate.cfg
|
include nova/db/sqlalchemy/migrate_repo/migrate.cfg
|
||||||
|
include nova/db/sqlalchemy/migrate_repo/README
|
||||||
include nova/virt/interfaces.template
|
include nova/virt/interfaces.template
|
||||||
include nova/virt/libvirt*.xml.template
|
include nova/virt/libvirt*.xml.template
|
||||||
include nova/tests/CA/
|
include nova/tests/CA/
|
||||||
@ -25,6 +34,7 @@ include nova/tests/bundle/1mb.manifest.xml
|
|||||||
include nova/tests/bundle/1mb.no_kernel_or_ramdisk.manifest.xml
|
include nova/tests/bundle/1mb.no_kernel_or_ramdisk.manifest.xml
|
||||||
include nova/tests/bundle/1mb.part.0
|
include nova/tests/bundle/1mb.part.0
|
||||||
include nova/tests/bundle/1mb.part.1
|
include nova/tests/bundle/1mb.part.1
|
||||||
|
include nova/tests/db/nova.austin.sqlite
|
||||||
include plugins/xenapi/README
|
include plugins/xenapi/README
|
||||||
include plugins/xenapi/etc/xapi.d/plugins/objectstore
|
include plugins/xenapi/etc/xapi.d/plugins/objectstore
|
||||||
include plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py
|
include plugins/xenapi/etc/xapi.d/plugins/pluginlib_nova.py
|
||||||
|
@ -433,6 +433,37 @@ class ProjectCommands(object):
|
|||||||
"nova-api server on this host.")
|
"nova-api server on this host.")
|
||||||
|
|
||||||
|
|
||||||
|
class FixedIpCommands(object):
|
||||||
|
"""Class for managing fixed ip."""
|
||||||
|
|
||||||
|
def list(self, host=None):
|
||||||
|
"""Lists all fixed ips (optionally by host) arguments: [host]"""
|
||||||
|
ctxt = context.get_admin_context()
|
||||||
|
if host == None:
|
||||||
|
fixed_ips = db.fixed_ip_get_all(ctxt)
|
||||||
|
else:
|
||||||
|
fixed_ips = db.fixed_ip_get_all_by_host(ctxt, host)
|
||||||
|
|
||||||
|
print "%-18s\t%-15s\t%-17s\t%-15s\t%s" % (_('network'),
|
||||||
|
_('IP address'),
|
||||||
|
_('MAC address'),
|
||||||
|
_('hostname'),
|
||||||
|
_('host'))
|
||||||
|
for fixed_ip in fixed_ips:
|
||||||
|
hostname = None
|
||||||
|
host = None
|
||||||
|
mac_address = None
|
||||||
|
if fixed_ip['instance']:
|
||||||
|
instance = fixed_ip['instance']
|
||||||
|
hostname = instance['hostname']
|
||||||
|
host = instance['host']
|
||||||
|
mac_address = instance['mac_address']
|
||||||
|
print "%-18s\t%-15s\t%-17s\t%-15s\t%s" % (
|
||||||
|
fixed_ip['network']['cidr'],
|
||||||
|
fixed_ip['address'],
|
||||||
|
mac_address, hostname, host)
|
||||||
|
|
||||||
|
|
||||||
class FloatingIpCommands(object):
|
class FloatingIpCommands(object):
|
||||||
"""Class for managing floating ip."""
|
"""Class for managing floating ip."""
|
||||||
|
|
||||||
@ -472,8 +503,8 @@ class NetworkCommands(object):
|
|||||||
"""Class for managing networks."""
|
"""Class for managing networks."""
|
||||||
|
|
||||||
def create(self, fixed_range=None, num_networks=None,
|
def create(self, fixed_range=None, num_networks=None,
|
||||||
network_size=None, vlan_start=None, vpn_start=None,
|
network_size=None, vlan_start=None,
|
||||||
fixed_range_v6=None):
|
vpn_start=None, fixed_range_v6=None, label='public'):
|
||||||
"""Creates fixed ips for host by range
|
"""Creates fixed ips for host by range
|
||||||
arguments: [fixed_range=FLAG], [num_networks=FLAG],
|
arguments: [fixed_range=FLAG], [num_networks=FLAG],
|
||||||
[network_size=FLAG], [vlan_start=FLAG],
|
[network_size=FLAG], [vlan_start=FLAG],
|
||||||
@ -495,9 +526,22 @@ class NetworkCommands(object):
|
|||||||
cidr=fixed_range,
|
cidr=fixed_range,
|
||||||
num_networks=int(num_networks),
|
num_networks=int(num_networks),
|
||||||
network_size=int(network_size),
|
network_size=int(network_size),
|
||||||
cidr_v6=fixed_range_v6,
|
|
||||||
vlan_start=int(vlan_start),
|
vlan_start=int(vlan_start),
|
||||||
vpn_start=int(vpn_start))
|
vpn_start=int(vpn_start),
|
||||||
|
cidr_v6=fixed_range_v6,
|
||||||
|
label=label)
|
||||||
|
|
||||||
|
def list(self):
|
||||||
|
"""List all created networks"""
|
||||||
|
print "%-18s\t%-15s\t%-15s\t%-15s" % (_('network'),
|
||||||
|
_('netmask'),
|
||||||
|
_('start address'),
|
||||||
|
'DNS')
|
||||||
|
for network in db.network_get_all(context.get_admin_context()):
|
||||||
|
print "%-18s\t%-15s\t%-15s\t%-15s" % (network.cidr,
|
||||||
|
network.netmask,
|
||||||
|
network.dhcp_start,
|
||||||
|
network.dns)
|
||||||
|
|
||||||
|
|
||||||
class ServiceCommands(object):
|
class ServiceCommands(object):
|
||||||
@ -579,6 +623,13 @@ class VolumeCommands(object):
|
|||||||
ctxt = context.get_admin_context()
|
ctxt = context.get_admin_context()
|
||||||
volume = db.volume_get(ctxt, param2id(volume_id))
|
volume = db.volume_get(ctxt, param2id(volume_id))
|
||||||
host = volume['host']
|
host = volume['host']
|
||||||
|
|
||||||
|
if not host:
|
||||||
|
print "Volume not yet assigned to host."
|
||||||
|
print "Deleting volume from database and skipping rpc."
|
||||||
|
db.volume_destroy(ctxt, param2id(volume_id))
|
||||||
|
return
|
||||||
|
|
||||||
if volume['status'] == 'in-use':
|
if volume['status'] == 'in-use':
|
||||||
print "Volume is in-use."
|
print "Volume is in-use."
|
||||||
print "Detach volume from instance and then try again."
|
print "Detach volume from instance and then try again."
|
||||||
@ -615,6 +666,7 @@ CATEGORIES = [
|
|||||||
('role', RoleCommands),
|
('role', RoleCommands),
|
||||||
('shell', ShellCommands),
|
('shell', ShellCommands),
|
||||||
('vpn', VpnCommands),
|
('vpn', VpnCommands),
|
||||||
|
('fixed', FixedIpCommands),
|
||||||
('floating', FloatingIpCommands),
|
('floating', FloatingIpCommands),
|
||||||
('network', NetworkCommands),
|
('network', NetworkCommands),
|
||||||
('service', ServiceCommands),
|
('service', ServiceCommands),
|
||||||
|
@ -37,20 +37,22 @@ From a server you intend to use as a cloud controller node, use this command to
|
|||||||
|
|
||||||
::
|
::
|
||||||
|
|
||||||
wget --no-check-certificate https://github.com/dubsquared/OpenStack-NOVA-Installer-Script/raw/master/Nova_CC_Installer_v0.1
|
wget --no-check-certificate https://github.com/dubsquared/OpenStack-NOVA-Installer-Script/raw/master/nova-CC-install-v1.1.sh
|
||||||
|
|
||||||
Ensure you can execute the script by modifying the permissions on the script file.
|
Ensure you can execute the script by modifying the permissions on the script file.
|
||||||
|
|
||||||
::
|
::
|
||||||
|
|
||||||
sudo chmod 755 Nova_CC_Installer_v0.1
|
sudo chmod 755 nova-CC-install-v1.1.sh
|
||||||
|
|
||||||
|
|
||||||
::
|
::
|
||||||
|
|
||||||
sudo ./Nova_CC_Installer_v0.1
|
sudo ./nova-CC-install-v1.1.sh
|
||||||
|
|
||||||
Next, from a server you intend to use as a compute node (doesn't contain the database), install the nova services. Copy the nova.conf from the cloud controller node to the compute node.
|
Next, from a server you intend to use as a compute node (doesn't contain the database), install the nova services. You can use the nova-NODE-installer.sh script from the above github-hosted project for the compute node installation.
|
||||||
|
|
||||||
|
Copy the nova.conf from the cloud controller node to the compute node.
|
||||||
|
|
||||||
Restart related services::
|
Restart related services::
|
||||||
|
|
||||||
@ -247,7 +249,7 @@ Here is an example of what this looks like with real data::
|
|||||||
|
|
||||||
Note: The nova-manage service assumes that the first IP address is your network (like 192.168.0.0), that the 2nd IP is your gateway (192.168.0.1), and that the broadcast is the very last IP in the range you defined (192.168.0.255). If this is not the case you will need to manually edit the sql db 'networks' table.o.
|
Note: The nova-manage service assumes that the first IP address is your network (like 192.168.0.0), that the 2nd IP is your gateway (192.168.0.1), and that the broadcast is the very last IP in the range you defined (192.168.0.255). If this is not the case you will need to manually edit the sql db 'networks' table.o.
|
||||||
|
|
||||||
On running this command, entries are made in the 'networks' and 'fixed_ips' table. However, one of the networks listed in the 'networks' table needs to be marked as bridge in order for the code to know that a bridge exists. The Network is marked as bridged automatically based on the type of network manager selected. This is ONLY necessary if you chose FlatManager as your network type. More information can be found at the end of this document discussing setting up the bridge device.
|
On running the "nova-manage network create" command, entries are made in the 'networks' and 'fixed_ips' table. However, one of the networks listed in the 'networks' table needs to be marked as bridge in order for the code to know that a bridge exists. The Network is marked as bridged automatically based on the type of network manager selected. You only need to mark the network as a bridge if you chose FlatManager as your network type. More information can be found at the end of this document discussing setting up the bridge device.
|
||||||
|
|
||||||
|
|
||||||
Step 2 - Create Nova certifications
|
Step 2 - Create Nova certifications
|
||||||
@ -288,9 +290,35 @@ Another common issue is you cannot ping or SSH your instances after issusing the
|
|||||||
killall dnsmasq
|
killall dnsmasq
|
||||||
service nova-network restart
|
service nova-network restart
|
||||||
|
|
||||||
|
To avoid issues with KVM and permissions with Nova, run the following commands to ensure we have VM's that are running optimally::
|
||||||
|
|
||||||
|
chgrp kvm /dev/kvm
|
||||||
|
chmod g+rwx /dev/kvm
|
||||||
|
|
||||||
|
If you want to use the 10.04 Ubuntu Enterprise Cloud images that are readily available at http://uec-images.ubuntu.com/releases/10.04/release/, you may run into delays with booting. Any server that does not have nova-api running on it needs this iptables entry so that UEC images can get metadata info. On compute nodes, configure the iptables with this next step::
|
||||||
|
|
||||||
|
# iptables -t nat -A PREROUTING -d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j DNAT --to-destination $NOVA_API_IP:8773
|
||||||
|
|
||||||
Testing the Installation
|
Testing the Installation
|
||||||
````````````````````````
|
````````````````````````
|
||||||
|
|
||||||
|
You can confirm that your compute node is talking to your cloud controller. From the cloud controller, run this database query::
|
||||||
|
|
||||||
|
mysql -u$MYSQL_USER -p$MYSQL_PASS nova -e 'select * from services;'
|
||||||
|
|
||||||
|
In return, you should see something similar to this::
|
||||||
|
+---------------------+---------------------+------------+---------+----+----------+----------------+-----------+--------------+----------+-------------------+
|
||||||
|
| created_at | updated_at | deleted_at | deleted | id | host | binary | topic | report_count | disabled | availability_zone |
|
||||||
|
+---------------------+---------------------+------------+---------+----+----------+----------------+-----------+--------------+----------+-------------------+
|
||||||
|
| 2011-01-28 22:52:46 | 2011-02-03 06:55:48 | NULL | 0 | 1 | osdemo02 | nova-network | network | 46064 | 0 | nova |
|
||||||
|
| 2011-01-28 22:52:48 | 2011-02-03 06:55:57 | NULL | 0 | 2 | osdemo02 | nova-compute | compute | 46056 | 0 | nova |
|
||||||
|
| 2011-01-28 22:52:52 | 2011-02-03 06:55:50 | NULL | 0 | 3 | osdemo02 | nova-scheduler | scheduler | 46065 | 0 | nova |
|
||||||
|
| 2011-01-29 23:49:29 | 2011-02-03 06:54:26 | NULL | 0 | 4 | osdemo01 | nova-compute | compute | 37050 | 0 | nova |
|
||||||
|
| 2011-01-30 23:42:24 | 2011-02-03 06:55:44 | NULL | 0 | 9 | osdemo04 | nova-compute | compute | 28484 | 0 | nova |
|
||||||
|
| 2011-01-30 21:27:28 | 2011-02-03 06:54:23 | NULL | 0 | 8 | osdemo05 | nova-compute | compute | 29284 | 0 | nova |
|
||||||
|
+---------------------+---------------------+------------+---------+----+----------+----------------+-----------+--------------+----------+-------------------+
|
||||||
|
You can see that 'osdemo0{1,2,4,5} are all running 'nova-compute.' When you start spinning up instances, they will allocate on any node that is running nova-compute from this list.
|
||||||
|
|
||||||
You can then use `euca2ools` to test some items::
|
You can then use `euca2ools` to test some items::
|
||||||
|
|
||||||
euca-describe-images
|
euca-describe-images
|
||||||
|
@ -1826,7 +1826,7 @@ msgstr ""
|
|||||||
|
|
||||||
#: nova/virt/xenapi/vm_utils.py:290
|
#: nova/virt/xenapi/vm_utils.py:290
|
||||||
#, python-format
|
#, python-format
|
||||||
msgid "PV Kernel in VDI:%d"
|
msgid "PV Kernel in VDI:%s"
|
||||||
msgstr ""
|
msgstr ""
|
||||||
|
|
||||||
#: nova/virt/xenapi/vm_utils.py:318
|
#: nova/virt/xenapi/vm_utils.py:318
|
||||||
|
@ -21,6 +21,7 @@ Nova User API client library.
|
|||||||
|
|
||||||
import base64
|
import base64
|
||||||
import boto
|
import boto
|
||||||
|
import boto.exception
|
||||||
import httplib
|
import httplib
|
||||||
|
|
||||||
from boto.ec2.regioninfo import RegionInfo
|
from boto.ec2.regioninfo import RegionInfo
|
||||||
@ -288,10 +289,14 @@ class NovaAdminClient(object):
|
|||||||
|
|
||||||
def get_user(self, name):
|
def get_user(self, name):
|
||||||
"""Grab a single user by name."""
|
"""Grab a single user by name."""
|
||||||
user = self.apiconn.get_object('DescribeUser', {'Name': name},
|
try:
|
||||||
UserInfo)
|
return self.apiconn.get_object('DescribeUser',
|
||||||
if user.username != None:
|
{'Name': name},
|
||||||
return user
|
UserInfo)
|
||||||
|
except boto.exception.BotoServerError, e:
|
||||||
|
if e.status == 400 and e.error_code == 'NotFound':
|
||||||
|
return None
|
||||||
|
raise
|
||||||
|
|
||||||
def has_user(self, username):
|
def has_user(self, username):
|
||||||
"""Determine if user exists."""
|
"""Determine if user exists."""
|
||||||
@ -376,6 +381,13 @@ class NovaAdminClient(object):
|
|||||||
'MemberUsers': member_users}
|
'MemberUsers': member_users}
|
||||||
return self.apiconn.get_object('RegisterProject', params, ProjectInfo)
|
return self.apiconn.get_object('RegisterProject', params, ProjectInfo)
|
||||||
|
|
||||||
|
def modify_project(self, projectname, manager_user=None, description=None):
|
||||||
|
"""Modifies an existing project."""
|
||||||
|
params = {'Name': projectname,
|
||||||
|
'ManagerUser': manager_user,
|
||||||
|
'Description': description}
|
||||||
|
return self.apiconn.get_status('ModifyProject', params)
|
||||||
|
|
||||||
def delete_project(self, projectname):
|
def delete_project(self, projectname):
|
||||||
"""Permanently deletes the specified project."""
|
"""Permanently deletes the specified project."""
|
||||||
return self.apiconn.get_object('DeregisterProject',
|
return self.apiconn.get_object('DeregisterProject',
|
||||||
|
@ -21,7 +21,6 @@ Starting point for routing EC2 requests.
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
import datetime
|
import datetime
|
||||||
import routes
|
|
||||||
import webob
|
import webob
|
||||||
import webob.dec
|
import webob.dec
|
||||||
import webob.exc
|
import webob.exc
|
||||||
@ -171,7 +170,7 @@ class Authenticate(wsgi.Middleware):
|
|||||||
req.path)
|
req.path)
|
||||||
# Be explicit for what exceptions are 403, the rest bubble as 500
|
# Be explicit for what exceptions are 403, the rest bubble as 500
|
||||||
except (exception.NotFound, exception.NotAuthorized) as ex:
|
except (exception.NotFound, exception.NotAuthorized) as ex:
|
||||||
LOG.audit(_("Authentication Failure: %s"), ex.args[0])
|
LOG.audit(_("Authentication Failure: %s"), unicode(ex))
|
||||||
raise webob.exc.HTTPForbidden()
|
raise webob.exc.HTTPForbidden()
|
||||||
|
|
||||||
# Authenticated!
|
# Authenticated!
|
||||||
@ -233,7 +232,7 @@ class Authorizer(wsgi.Middleware):
|
|||||||
super(Authorizer, self).__init__(application)
|
super(Authorizer, self).__init__(application)
|
||||||
self.action_roles = {
|
self.action_roles = {
|
||||||
'CloudController': {
|
'CloudController': {
|
||||||
'DescribeAvailabilityzones': ['all'],
|
'DescribeAvailabilityZones': ['all'],
|
||||||
'DescribeRegions': ['all'],
|
'DescribeRegions': ['all'],
|
||||||
'DescribeSnapshots': ['all'],
|
'DescribeSnapshots': ['all'],
|
||||||
'DescribeKeyPairs': ['all'],
|
'DescribeKeyPairs': ['all'],
|
||||||
@ -316,30 +315,31 @@ class Executor(wsgi.Application):
|
|||||||
try:
|
try:
|
||||||
result = api_request.invoke(context)
|
result = api_request.invoke(context)
|
||||||
except exception.InstanceNotFound as ex:
|
except exception.InstanceNotFound as ex:
|
||||||
LOG.info(_('InstanceNotFound raised: %s'), ex.args[0],
|
LOG.info(_('InstanceNotFound raised: %s'), unicode(ex),
|
||||||
context=context)
|
context=context)
|
||||||
ec2_id = cloud.id_to_ec2_id(ex.instance_id)
|
ec2_id = cloud.id_to_ec2_id(ex.instance_id)
|
||||||
message = _('Instance %s not found') % ec2_id
|
message = _('Instance %s not found') % ec2_id
|
||||||
return self._error(req, context, type(ex).__name__, message)
|
return self._error(req, context, type(ex).__name__, message)
|
||||||
except exception.VolumeNotFound as ex:
|
except exception.VolumeNotFound as ex:
|
||||||
LOG.info(_('VolumeNotFound raised: %s'), ex.args[0],
|
LOG.info(_('VolumeNotFound raised: %s'), unicode(ex),
|
||||||
context=context)
|
context=context)
|
||||||
ec2_id = cloud.id_to_ec2_id(ex.volume_id, 'vol-%08x')
|
ec2_id = cloud.id_to_ec2_id(ex.volume_id, 'vol-%08x')
|
||||||
message = _('Volume %s not found') % ec2_id
|
message = _('Volume %s not found') % ec2_id
|
||||||
return self._error(req, context, type(ex).__name__, message)
|
return self._error(req, context, type(ex).__name__, message)
|
||||||
except exception.NotFound as ex:
|
except exception.NotFound as ex:
|
||||||
LOG.info(_('NotFound raised: %s'), ex.args[0], context=context)
|
LOG.info(_('NotFound raised: %s'), unicode(ex), context=context)
|
||||||
return self._error(req, context, type(ex).__name__, ex.args[0])
|
return self._error(req, context, type(ex).__name__, unicode(ex))
|
||||||
except exception.ApiError as ex:
|
except exception.ApiError as ex:
|
||||||
LOG.exception(_('ApiError raised: %s'), ex.args[0],
|
LOG.exception(_('ApiError raised: %s'), unicode(ex),
|
||||||
context=context)
|
context=context)
|
||||||
if ex.code:
|
if ex.code:
|
||||||
return self._error(req, context, ex.code, ex.args[0])
|
return self._error(req, context, ex.code, unicode(ex))
|
||||||
else:
|
else:
|
||||||
return self._error(req, context, type(ex).__name__, ex.args[0])
|
return self._error(req, context, type(ex).__name__,
|
||||||
|
unicode(ex))
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
extra = {'environment': req.environ}
|
extra = {'environment': req.environ}
|
||||||
LOG.exception(_('Unexpected error raised: %s'), ex.args[0],
|
LOG.exception(_('Unexpected error raised: %s'), unicode(ex),
|
||||||
extra=extra, context=context)
|
extra=extra, context=context)
|
||||||
return self._error(req,
|
return self._error(req,
|
||||||
context,
|
context,
|
||||||
|
@ -184,6 +184,17 @@ class AdminController(object):
|
|||||||
description=None,
|
description=None,
|
||||||
member_users=None))
|
member_users=None))
|
||||||
|
|
||||||
|
def modify_project(self, context, name, manager_user, description=None,
|
||||||
|
**kwargs):
|
||||||
|
"""Modifies a project"""
|
||||||
|
msg = _("Modify project: %(name)s managed by"
|
||||||
|
" %(manager_user)s") % locals()
|
||||||
|
LOG.audit(msg, context=context)
|
||||||
|
manager.AuthManager().modify_project(name,
|
||||||
|
manager_user=manager_user,
|
||||||
|
description=description)
|
||||||
|
return True
|
||||||
|
|
||||||
def deregister_project(self, context, name):
|
def deregister_project(self, context, name):
|
||||||
"""Permanently deletes a project."""
|
"""Permanently deletes a project."""
|
||||||
LOG.audit(_("Delete project: %s"), name, context=context)
|
LOG.audit(_("Delete project: %s"), name, context=context)
|
||||||
|
@ -327,7 +327,9 @@ class CloudController(object):
|
|||||||
if not group_name is None:
|
if not group_name is None:
|
||||||
groups = [g for g in groups if g.name in group_name]
|
groups = [g for g in groups if g.name in group_name]
|
||||||
|
|
||||||
return {'securityGroupInfo': groups}
|
return {'securityGroupInfo':
|
||||||
|
list(sorted(groups,
|
||||||
|
key=lambda k: (k['ownerId'], k['groupName'])))}
|
||||||
|
|
||||||
def _format_security_group(self, context, group):
|
def _format_security_group(self, context, group):
|
||||||
g = {}
|
g = {}
|
||||||
@ -512,8 +514,11 @@ class CloudController(object):
|
|||||||
def get_console_output(self, context, instance_id, **kwargs):
|
def get_console_output(self, context, instance_id, **kwargs):
|
||||||
LOG.audit(_("Get console output for instance %s"), instance_id,
|
LOG.audit(_("Get console output for instance %s"), instance_id,
|
||||||
context=context)
|
context=context)
|
||||||
# instance_id is passed in as a list of instances
|
# instance_id may be passed in as a list of instances
|
||||||
ec2_id = instance_id[0]
|
if type(instance_id) == list:
|
||||||
|
ec2_id = instance_id[0]
|
||||||
|
else:
|
||||||
|
ec2_id = instance_id
|
||||||
instance_id = ec2_id_to_id(ec2_id)
|
instance_id = ec2_id_to_id(ec2_id)
|
||||||
output = self.compute_api.get_console_output(
|
output = self.compute_api.get_console_output(
|
||||||
context, instance_id=instance_id)
|
context, instance_id=instance_id)
|
||||||
@ -836,11 +841,26 @@ class CloudController(object):
|
|||||||
self.compute_api.update(context, instance_id=instance_id, **kwargs)
|
self.compute_api.update(context, instance_id=instance_id, **kwargs)
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
def _format_image(self, context, image):
|
||||||
|
"""Convert from format defined by BaseImageService to S3 format."""
|
||||||
|
i = {}
|
||||||
|
i['imageId'] = image.get('id')
|
||||||
|
i['kernelId'] = image.get('kernel_id')
|
||||||
|
i['ramdiskId'] = image.get('ramdisk_id')
|
||||||
|
i['imageOwnerId'] = image.get('owner_id')
|
||||||
|
i['imageLocation'] = image.get('location')
|
||||||
|
i['imageState'] = image.get('status')
|
||||||
|
i['type'] = image.get('type')
|
||||||
|
i['isPublic'] = image.get('is_public')
|
||||||
|
i['architecture'] = image.get('architecture')
|
||||||
|
return i
|
||||||
|
|
||||||
def describe_images(self, context, image_id=None, **kwargs):
|
def describe_images(self, context, image_id=None, **kwargs):
|
||||||
# Note: image_id is a list!
|
# NOTE: image_id is a list!
|
||||||
images = self.image_service.index(context)
|
images = self.image_service.index(context)
|
||||||
if image_id:
|
if image_id:
|
||||||
images = filter(lambda x: x['imageId'] in image_id, images)
|
images = filter(lambda x: x['id'] in image_id, images)
|
||||||
|
images = [self._format_image(context, i) for i in images]
|
||||||
return {'imagesSet': images}
|
return {'imagesSet': images}
|
||||||
|
|
||||||
def deregister_image(self, context, image_id, **kwargs):
|
def deregister_image(self, context, image_id, **kwargs):
|
||||||
@ -863,6 +883,9 @@ class CloudController(object):
|
|||||||
% attribute)
|
% attribute)
|
||||||
try:
|
try:
|
||||||
image = self.image_service.show(context, image_id)
|
image = self.image_service.show(context, image_id)
|
||||||
|
image = self._format_image(context,
|
||||||
|
self.image_service.show(context,
|
||||||
|
image_id))
|
||||||
except IndexError:
|
except IndexError:
|
||||||
raise exception.ApiError(_('invalid id: %s') % image_id)
|
raise exception.ApiError(_('invalid id: %s') % image_id)
|
||||||
result = {'image_id': image_id, 'launchPermission': []}
|
result = {'image_id': image_id, 'launchPermission': []}
|
||||||
|
@ -51,8 +51,8 @@ class FaultWrapper(wsgi.Middleware):
|
|||||||
try:
|
try:
|
||||||
return req.get_response(self.application)
|
return req.get_response(self.application)
|
||||||
except Exception as ex:
|
except Exception as ex:
|
||||||
LOG.exception(_("Caught error: %s"), str(ex))
|
LOG.exception(_("Caught error: %s"), unicode(ex))
|
||||||
exc = webob.exc.HTTPInternalServerError(explanation=str(ex))
|
exc = webob.exc.HTTPInternalServerError(explanation=unicode(ex))
|
||||||
return faults.Fault(exc)
|
return faults.Fault(exc)
|
||||||
|
|
||||||
|
|
||||||
@ -79,6 +79,7 @@ class APIRouter(wsgi.Router):
|
|||||||
server_members["actions"] = "GET"
|
server_members["actions"] = "GET"
|
||||||
server_members['suspend'] = 'POST'
|
server_members['suspend'] = 'POST'
|
||||||
server_members['resume'] = 'POST'
|
server_members['resume'] = 'POST'
|
||||||
|
server_members['reset_network'] = 'POST'
|
||||||
|
|
||||||
mapper.resource("server", "servers", controller=servers.Controller(),
|
mapper.resource("server", "servers", controller=servers.Controller(),
|
||||||
collection={'detail': 'GET'},
|
collection={'detail': 'GET'},
|
||||||
|
@ -18,22 +18,29 @@
|
|||||||
from nova import exception
|
from nova import exception
|
||||||
|
|
||||||
|
|
||||||
def limited(items, req):
|
def limited(items, request, max_limit=1000):
|
||||||
"""Return a slice of items according to requested offset and limit.
|
|
||||||
|
|
||||||
items - a sliceable
|
|
||||||
req - wobob.Request possibly containing offset and limit GET variables.
|
|
||||||
offset is where to start in the list, and limit is the maximum number
|
|
||||||
of items to return.
|
|
||||||
|
|
||||||
If limit is not specified, 0, or > 1000, defaults to 1000.
|
|
||||||
"""
|
"""
|
||||||
|
Return a slice of items according to requested offset and limit.
|
||||||
|
|
||||||
offset = int(req.GET.get('offset', 0))
|
@param items: A sliceable entity
|
||||||
limit = int(req.GET.get('limit', 0))
|
@param request: `webob.Request` possibly containing 'offset' and 'limit'
|
||||||
if not limit:
|
GET variables. 'offset' is where to start in the list,
|
||||||
limit = 1000
|
and 'limit' is the maximum number of items to return. If
|
||||||
limit = min(1000, limit)
|
'limit' is not specified, 0, or > max_limit, we default
|
||||||
|
to max_limit.
|
||||||
|
@kwarg max_limit: The maximum number of items to return from 'items'
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
offset = int(request.GET.get('offset', 0))
|
||||||
|
except ValueError:
|
||||||
|
offset = 0
|
||||||
|
|
||||||
|
try:
|
||||||
|
limit = int(request.GET.get('limit', max_limit))
|
||||||
|
except ValueError:
|
||||||
|
limit = max_limit
|
||||||
|
|
||||||
|
limit = min(max_limit, limit or max_limit)
|
||||||
range_end = offset + limit
|
range_end = offset + limit
|
||||||
return items[offset:range_end]
|
return items[offset:range_end]
|
||||||
|
|
||||||
|
@ -64,6 +64,22 @@ def _translate_detail_keys(inst):
|
|||||||
|
|
||||||
inst_dict['status'] = power_mapping[inst_dict['status']]
|
inst_dict['status'] = power_mapping[inst_dict['status']]
|
||||||
inst_dict['addresses'] = dict(public=[], private=[])
|
inst_dict['addresses'] = dict(public=[], private=[])
|
||||||
|
|
||||||
|
# grab single private fixed ip
|
||||||
|
try:
|
||||||
|
private_ip = inst['fixed_ip']['address']
|
||||||
|
if private_ip:
|
||||||
|
inst_dict['addresses']['private'].append(private_ip)
|
||||||
|
except KeyError:
|
||||||
|
LOG.debug(_("Failed to read private ip"))
|
||||||
|
|
||||||
|
# grab all public floating ips
|
||||||
|
try:
|
||||||
|
for floating in inst['fixed_ip']['floating_ips']:
|
||||||
|
inst_dict['addresses']['public'].append(floating['address'])
|
||||||
|
except KeyError:
|
||||||
|
LOG.debug(_("Failed to read public ip(s)"))
|
||||||
|
|
||||||
inst_dict['metadata'] = {}
|
inst_dict['metadata'] = {}
|
||||||
inst_dict['hostId'] = ''
|
inst_dict['hostId'] = ''
|
||||||
|
|
||||||
@ -163,7 +179,8 @@ class Controller(wsgi.Controller):
|
|||||||
display_name=env['server']['name'],
|
display_name=env['server']['name'],
|
||||||
display_description=env['server']['name'],
|
display_description=env['server']['name'],
|
||||||
key_name=key_pair['name'],
|
key_name=key_pair['name'],
|
||||||
key_data=key_pair['public_key'])
|
key_data=key_pair['public_key'],
|
||||||
|
onset_files=env.get('onset_files', []))
|
||||||
return _translate_keys(instances[0])
|
return _translate_keys(instances[0])
|
||||||
|
|
||||||
def update(self, req, id):
|
def update(self, req, id):
|
||||||
@ -249,6 +266,20 @@ class Controller(wsgi.Controller):
|
|||||||
return faults.Fault(exc.HTTPUnprocessableEntity())
|
return faults.Fault(exc.HTTPUnprocessableEntity())
|
||||||
return exc.HTTPAccepted()
|
return exc.HTTPAccepted()
|
||||||
|
|
||||||
|
def reset_network(self, req, id):
|
||||||
|
"""
|
||||||
|
Reset networking on an instance (admin only).
|
||||||
|
|
||||||
|
"""
|
||||||
|
context = req.environ['nova.context']
|
||||||
|
try:
|
||||||
|
self.compute_api.reset_network(context, id)
|
||||||
|
except:
|
||||||
|
readable = traceback.format_exc()
|
||||||
|
LOG.exception(_("Compute.api::reset_network %s"), readable)
|
||||||
|
return faults.Fault(exc.HTTPUnprocessableEntity())
|
||||||
|
return exc.HTTPAccepted()
|
||||||
|
|
||||||
def pause(self, req, id):
|
def pause(self, req, id):
|
||||||
""" Permit Admins to Pause the server. """
|
""" Permit Admins to Pause the server. """
|
||||||
ctxt = req.environ['nova.context']
|
ctxt = req.environ['nova.context']
|
||||||
|
@ -74,6 +74,25 @@ LOG = logging.getLogger("nova.ldapdriver")
|
|||||||
# in which we may want to change the interface a bit more.
|
# in which we may want to change the interface a bit more.
|
||||||
|
|
||||||
|
|
||||||
|
def _clean(attr):
|
||||||
|
"""Clean attr for insertion into ldap"""
|
||||||
|
if attr is None:
|
||||||
|
return None
|
||||||
|
if type(attr) is unicode:
|
||||||
|
return str(attr)
|
||||||
|
return attr
|
||||||
|
|
||||||
|
|
||||||
|
def sanitize(fn):
|
||||||
|
"""Decorator to sanitize all args"""
|
||||||
|
def _wrapped(self, *args, **kwargs):
|
||||||
|
args = [_clean(x) for x in args]
|
||||||
|
kwargs = dict((k, _clean(v)) for (k, v) in kwargs)
|
||||||
|
return fn(self, *args, **kwargs)
|
||||||
|
_wrapped.func_name = fn.func_name
|
||||||
|
return _wrapped
|
||||||
|
|
||||||
|
|
||||||
class LdapDriver(object):
|
class LdapDriver(object):
|
||||||
"""Ldap Auth driver
|
"""Ldap Auth driver
|
||||||
|
|
||||||
@ -106,23 +125,27 @@ class LdapDriver(object):
|
|||||||
self.conn.unbind_s()
|
self.conn.unbind_s()
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
@sanitize
|
||||||
def get_user(self, uid):
|
def get_user(self, uid):
|
||||||
"""Retrieve user by id"""
|
"""Retrieve user by id"""
|
||||||
attr = self.__get_ldap_user(uid)
|
attr = self.__get_ldap_user(uid)
|
||||||
return self.__to_user(attr)
|
return self.__to_user(attr)
|
||||||
|
|
||||||
|
@sanitize
|
||||||
def get_user_from_access_key(self, access):
|
def get_user_from_access_key(self, access):
|
||||||
"""Retrieve user by access key"""
|
"""Retrieve user by access key"""
|
||||||
query = '(accessKey=%s)' % access
|
query = '(accessKey=%s)' % access
|
||||||
dn = FLAGS.ldap_user_subtree
|
dn = FLAGS.ldap_user_subtree
|
||||||
return self.__to_user(self.__find_object(dn, query))
|
return self.__to_user(self.__find_object(dn, query))
|
||||||
|
|
||||||
|
@sanitize
|
||||||
def get_project(self, pid):
|
def get_project(self, pid):
|
||||||
"""Retrieve project by id"""
|
"""Retrieve project by id"""
|
||||||
dn = self.__project_to_dn(pid)
|
dn = self.__project_to_dn(pid)
|
||||||
attr = self.__find_object(dn, LdapDriver.project_pattern)
|
attr = self.__find_object(dn, LdapDriver.project_pattern)
|
||||||
return self.__to_project(attr)
|
return self.__to_project(attr)
|
||||||
|
|
||||||
|
@sanitize
|
||||||
def get_users(self):
|
def get_users(self):
|
||||||
"""Retrieve list of users"""
|
"""Retrieve list of users"""
|
||||||
attrs = self.__find_objects(FLAGS.ldap_user_subtree,
|
attrs = self.__find_objects(FLAGS.ldap_user_subtree,
|
||||||
@ -134,6 +157,7 @@ class LdapDriver(object):
|
|||||||
users.append(user)
|
users.append(user)
|
||||||
return users
|
return users
|
||||||
|
|
||||||
|
@sanitize
|
||||||
def get_projects(self, uid=None):
|
def get_projects(self, uid=None):
|
||||||
"""Retrieve list of projects"""
|
"""Retrieve list of projects"""
|
||||||
pattern = LdapDriver.project_pattern
|
pattern = LdapDriver.project_pattern
|
||||||
@ -143,6 +167,7 @@ class LdapDriver(object):
|
|||||||
pattern)
|
pattern)
|
||||||
return [self.__to_project(attr) for attr in attrs]
|
return [self.__to_project(attr) for attr in attrs]
|
||||||
|
|
||||||
|
@sanitize
|
||||||
def create_user(self, name, access_key, secret_key, is_admin):
|
def create_user(self, name, access_key, secret_key, is_admin):
|
||||||
"""Create a user"""
|
"""Create a user"""
|
||||||
if self.__user_exists(name):
|
if self.__user_exists(name):
|
||||||
@ -196,6 +221,7 @@ class LdapDriver(object):
|
|||||||
self.conn.add_s(self.__uid_to_dn(name), attr)
|
self.conn.add_s(self.__uid_to_dn(name), attr)
|
||||||
return self.__to_user(dict(attr))
|
return self.__to_user(dict(attr))
|
||||||
|
|
||||||
|
@sanitize
|
||||||
def create_project(self, name, manager_uid,
|
def create_project(self, name, manager_uid,
|
||||||
description=None, member_uids=None):
|
description=None, member_uids=None):
|
||||||
"""Create a project"""
|
"""Create a project"""
|
||||||
@ -231,6 +257,7 @@ class LdapDriver(object):
|
|||||||
self.conn.add_s(dn, attr)
|
self.conn.add_s(dn, attr)
|
||||||
return self.__to_project(dict(attr))
|
return self.__to_project(dict(attr))
|
||||||
|
|
||||||
|
@sanitize
|
||||||
def modify_project(self, project_id, manager_uid=None, description=None):
|
def modify_project(self, project_id, manager_uid=None, description=None):
|
||||||
"""Modify an existing project"""
|
"""Modify an existing project"""
|
||||||
if not manager_uid and not description:
|
if not manager_uid and not description:
|
||||||
@ -249,21 +276,25 @@ class LdapDriver(object):
|
|||||||
dn = self.__project_to_dn(project_id)
|
dn = self.__project_to_dn(project_id)
|
||||||
self.conn.modify_s(dn, attr)
|
self.conn.modify_s(dn, attr)
|
||||||
|
|
||||||
|
@sanitize
|
||||||
def add_to_project(self, uid, project_id):
|
def add_to_project(self, uid, project_id):
|
||||||
"""Add user to project"""
|
"""Add user to project"""
|
||||||
dn = self.__project_to_dn(project_id)
|
dn = self.__project_to_dn(project_id)
|
||||||
return self.__add_to_group(uid, dn)
|
return self.__add_to_group(uid, dn)
|
||||||
|
|
||||||
|
@sanitize
|
||||||
def remove_from_project(self, uid, project_id):
|
def remove_from_project(self, uid, project_id):
|
||||||
"""Remove user from project"""
|
"""Remove user from project"""
|
||||||
dn = self.__project_to_dn(project_id)
|
dn = self.__project_to_dn(project_id)
|
||||||
return self.__remove_from_group(uid, dn)
|
return self.__remove_from_group(uid, dn)
|
||||||
|
|
||||||
|
@sanitize
|
||||||
def is_in_project(self, uid, project_id):
|
def is_in_project(self, uid, project_id):
|
||||||
"""Check if user is in project"""
|
"""Check if user is in project"""
|
||||||
dn = self.__project_to_dn(project_id)
|
dn = self.__project_to_dn(project_id)
|
||||||
return self.__is_in_group(uid, dn)
|
return self.__is_in_group(uid, dn)
|
||||||
|
|
||||||
|
@sanitize
|
||||||
def has_role(self, uid, role, project_id=None):
|
def has_role(self, uid, role, project_id=None):
|
||||||
"""Check if user has role
|
"""Check if user has role
|
||||||
|
|
||||||
@ -273,6 +304,7 @@ class LdapDriver(object):
|
|||||||
role_dn = self.__role_to_dn(role, project_id)
|
role_dn = self.__role_to_dn(role, project_id)
|
||||||
return self.__is_in_group(uid, role_dn)
|
return self.__is_in_group(uid, role_dn)
|
||||||
|
|
||||||
|
@sanitize
|
||||||
def add_role(self, uid, role, project_id=None):
|
def add_role(self, uid, role, project_id=None):
|
||||||
"""Add role for user (or user and project)"""
|
"""Add role for user (or user and project)"""
|
||||||
role_dn = self.__role_to_dn(role, project_id)
|
role_dn = self.__role_to_dn(role, project_id)
|
||||||
@ -283,11 +315,13 @@ class LdapDriver(object):
|
|||||||
else:
|
else:
|
||||||
return self.__add_to_group(uid, role_dn)
|
return self.__add_to_group(uid, role_dn)
|
||||||
|
|
||||||
|
@sanitize
|
||||||
def remove_role(self, uid, role, project_id=None):
|
def remove_role(self, uid, role, project_id=None):
|
||||||
"""Remove role for user (or user and project)"""
|
"""Remove role for user (or user and project)"""
|
||||||
role_dn = self.__role_to_dn(role, project_id)
|
role_dn = self.__role_to_dn(role, project_id)
|
||||||
return self.__remove_from_group(uid, role_dn)
|
return self.__remove_from_group(uid, role_dn)
|
||||||
|
|
||||||
|
@sanitize
|
||||||
def get_user_roles(self, uid, project_id=None):
|
def get_user_roles(self, uid, project_id=None):
|
||||||
"""Retrieve list of roles for user (or user and project)"""
|
"""Retrieve list of roles for user (or user and project)"""
|
||||||
if project_id is None:
|
if project_id is None:
|
||||||
@ -307,6 +341,7 @@ class LdapDriver(object):
|
|||||||
roles = self.__find_objects(project_dn, query)
|
roles = self.__find_objects(project_dn, query)
|
||||||
return [role['cn'][0] for role in roles]
|
return [role['cn'][0] for role in roles]
|
||||||
|
|
||||||
|
@sanitize
|
||||||
def delete_user(self, uid):
|
def delete_user(self, uid):
|
||||||
"""Delete a user"""
|
"""Delete a user"""
|
||||||
if not self.__user_exists(uid):
|
if not self.__user_exists(uid):
|
||||||
@ -332,12 +367,14 @@ class LdapDriver(object):
|
|||||||
# Delete entry
|
# Delete entry
|
||||||
self.conn.delete_s(self.__uid_to_dn(uid))
|
self.conn.delete_s(self.__uid_to_dn(uid))
|
||||||
|
|
||||||
|
@sanitize
|
||||||
def delete_project(self, project_id):
|
def delete_project(self, project_id):
|
||||||
"""Delete a project"""
|
"""Delete a project"""
|
||||||
project_dn = self.__project_to_dn(project_id)
|
project_dn = self.__project_to_dn(project_id)
|
||||||
self.__delete_roles(project_dn)
|
self.__delete_roles(project_dn)
|
||||||
self.__delete_group(project_dn)
|
self.__delete_group(project_dn)
|
||||||
|
|
||||||
|
@sanitize
|
||||||
def modify_user(self, uid, access_key=None, secret_key=None, admin=None):
|
def modify_user(self, uid, access_key=None, secret_key=None, admin=None):
|
||||||
"""Modify an existing user"""
|
"""Modify an existing user"""
|
||||||
if not access_key and not secret_key and admin is None:
|
if not access_key and not secret_key and admin is None:
|
||||||
|
@ -67,10 +67,10 @@ class API(base.Base):
|
|||||||
"""Get the network topic for an instance."""
|
"""Get the network topic for an instance."""
|
||||||
try:
|
try:
|
||||||
instance = self.get(context, instance_id)
|
instance = self.get(context, instance_id)
|
||||||
except exception.NotFound as e:
|
except exception.NotFound:
|
||||||
LOG.warning(_("Instance %d was not found in get_network_topic"),
|
LOG.warning(_("Instance %d was not found in get_network_topic"),
|
||||||
instance_id)
|
instance_id)
|
||||||
raise e
|
raise
|
||||||
|
|
||||||
host = instance['host']
|
host = instance['host']
|
||||||
if not host:
|
if not host:
|
||||||
@ -85,10 +85,11 @@ class API(base.Base):
|
|||||||
min_count=1, max_count=1,
|
min_count=1, max_count=1,
|
||||||
display_name='', display_description='',
|
display_name='', display_description='',
|
||||||
key_name=None, key_data=None, security_group='default',
|
key_name=None, key_data=None, security_group='default',
|
||||||
availability_zone=None, user_data=None):
|
availability_zone=None, user_data=None,
|
||||||
|
onset_files=None):
|
||||||
"""Create the number of instances requested if quota and
|
"""Create the number of instances requested if quota and
|
||||||
other arguments check out ok."""
|
other arguments check out ok.
|
||||||
|
"""
|
||||||
type_data = instance_types.INSTANCE_TYPES[instance_type]
|
type_data = instance_types.INSTANCE_TYPES[instance_type]
|
||||||
num_instances = quota.allowed_instances(context, max_count, type_data)
|
num_instances = quota.allowed_instances(context, max_count, type_data)
|
||||||
if num_instances < min_count:
|
if num_instances < min_count:
|
||||||
@ -103,9 +104,9 @@ class API(base.Base):
|
|||||||
if not is_vpn:
|
if not is_vpn:
|
||||||
image = self.image_service.show(context, image_id)
|
image = self.image_service.show(context, image_id)
|
||||||
if kernel_id is None:
|
if kernel_id is None:
|
||||||
kernel_id = image.get('kernelId', None)
|
kernel_id = image.get('kernel_id', None)
|
||||||
if ramdisk_id is None:
|
if ramdisk_id is None:
|
||||||
ramdisk_id = image.get('ramdiskId', None)
|
ramdisk_id = image.get('ramdisk_id', None)
|
||||||
# No kernel and ramdisk for raw images
|
# No kernel and ramdisk for raw images
|
||||||
if kernel_id == str(FLAGS.null_kernel):
|
if kernel_id == str(FLAGS.null_kernel):
|
||||||
kernel_id = None
|
kernel_id = None
|
||||||
@ -156,7 +157,6 @@ class API(base.Base):
|
|||||||
'key_data': key_data,
|
'key_data': key_data,
|
||||||
'locked': False,
|
'locked': False,
|
||||||
'availability_zone': availability_zone}
|
'availability_zone': availability_zone}
|
||||||
|
|
||||||
elevated = context.elevated()
|
elevated = context.elevated()
|
||||||
instances = []
|
instances = []
|
||||||
LOG.debug(_("Going to run %s instances..."), num_instances)
|
LOG.debug(_("Going to run %s instances..."), num_instances)
|
||||||
@ -193,7 +193,8 @@ class API(base.Base):
|
|||||||
{"method": "run_instance",
|
{"method": "run_instance",
|
||||||
"args": {"topic": FLAGS.compute_topic,
|
"args": {"topic": FLAGS.compute_topic,
|
||||||
"instance_id": instance_id,
|
"instance_id": instance_id,
|
||||||
"availability_zone": availability_zone}})
|
"availability_zone": availability_zone,
|
||||||
|
"onset_files": onset_files}})
|
||||||
|
|
||||||
for group_id in security_groups:
|
for group_id in security_groups:
|
||||||
self.trigger_security_group_members_refresh(elevated, group_id)
|
self.trigger_security_group_members_refresh(elevated, group_id)
|
||||||
@ -293,10 +294,10 @@ class API(base.Base):
|
|||||||
LOG.debug(_("Going to try to terminate %s"), instance_id)
|
LOG.debug(_("Going to try to terminate %s"), instance_id)
|
||||||
try:
|
try:
|
||||||
instance = self.get(context, instance_id)
|
instance = self.get(context, instance_id)
|
||||||
except exception.NotFound as e:
|
except exception.NotFound:
|
||||||
LOG.warning(_("Instance %d was not found during terminate"),
|
LOG.warning(_("Instance %d was not found during terminate"),
|
||||||
instance_id)
|
instance_id)
|
||||||
raise e
|
raise
|
||||||
|
|
||||||
if (instance['state_description'] == 'terminating'):
|
if (instance['state_description'] == 'terminating'):
|
||||||
LOG.warning(_("Instance %d is already being terminated"),
|
LOG.warning(_("Instance %d is already being terminated"),
|
||||||
@ -434,6 +435,10 @@ class API(base.Base):
|
|||||||
"""Set the root/admin password for the given instance."""
|
"""Set the root/admin password for the given instance."""
|
||||||
self._cast_compute_message('set_admin_password', context, instance_id)
|
self._cast_compute_message('set_admin_password', context, instance_id)
|
||||||
|
|
||||||
|
def inject_file(self, context, instance_id):
|
||||||
|
"""Write a file to the given instance."""
|
||||||
|
self._cast_compute_message('inject_file', context, instance_id)
|
||||||
|
|
||||||
def get_ajax_console(self, context, instance_id):
|
def get_ajax_console(self, context, instance_id):
|
||||||
"""Get a url to an AJAX Console"""
|
"""Get a url to an AJAX Console"""
|
||||||
instance = self.get(context, instance_id)
|
instance = self.get(context, instance_id)
|
||||||
@ -466,6 +471,13 @@ class API(base.Base):
|
|||||||
instance = self.get(context, instance_id)
|
instance = self.get(context, instance_id)
|
||||||
return instance['locked']
|
return instance['locked']
|
||||||
|
|
||||||
|
def reset_network(self, context, instance_id):
|
||||||
|
"""
|
||||||
|
Reset networking on the instance.
|
||||||
|
|
||||||
|
"""
|
||||||
|
self._cast_compute_message('reset_network', context, instance_id)
|
||||||
|
|
||||||
def attach_volume(self, context, instance_id, volume_id, device):
|
def attach_volume(self, context, instance_id, volume_id, device):
|
||||||
if not re.match("^/dev/[a-z]d[a-z]+$", device):
|
if not re.match("^/dev/[a-z]d[a-z]+$", device):
|
||||||
raise exception.ApiError(_("Invalid device specified: %s. "
|
raise exception.ApiError(_("Invalid device specified: %s. "
|
||||||
|
@ -38,8 +38,8 @@ def get_by_type(instance_type):
|
|||||||
if instance_type is None:
|
if instance_type is None:
|
||||||
return FLAGS.default_instance_type
|
return FLAGS.default_instance_type
|
||||||
if instance_type not in INSTANCE_TYPES:
|
if instance_type not in INSTANCE_TYPES:
|
||||||
raise exception.ApiError(_("Unknown instance type: %s"),
|
raise exception.ApiError(_("Unknown instance type: %s") % \
|
||||||
instance_type)
|
instance_type, "Invalid")
|
||||||
return instance_type
|
return instance_type
|
||||||
|
|
||||||
|
|
||||||
|
@ -34,6 +34,7 @@ terminating it.
|
|||||||
:func:`nova.utils.import_object`
|
:func:`nova.utils.import_object`
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import base64
|
||||||
import datetime
|
import datetime
|
||||||
import random
|
import random
|
||||||
import string
|
import string
|
||||||
@ -127,10 +128,10 @@ class ComputeManager(manager.Manager):
|
|||||||
info = self.driver.get_info(instance_ref['name'])
|
info = self.driver.get_info(instance_ref['name'])
|
||||||
state = info['state']
|
state = info['state']
|
||||||
except exception.NotFound:
|
except exception.NotFound:
|
||||||
state = power_state.NOSTATE
|
state = power_state.FAILED
|
||||||
self.db.instance_set_state(context, instance_id, state)
|
self.db.instance_set_state(context, instance_id, state)
|
||||||
|
|
||||||
def get_console_topic(self, context, **_kwargs):
|
def get_console_topic(self, context, **kwargs):
|
||||||
"""Retrieves the console host for a project on this host
|
"""Retrieves the console host for a project on this host
|
||||||
Currently this is just set in the flags for each compute
|
Currently this is just set in the flags for each compute
|
||||||
host."""
|
host."""
|
||||||
@ -139,7 +140,7 @@ class ComputeManager(manager.Manager):
|
|||||||
FLAGS.console_topic,
|
FLAGS.console_topic,
|
||||||
FLAGS.console_host)
|
FLAGS.console_host)
|
||||||
|
|
||||||
def get_network_topic(self, context, **_kwargs):
|
def get_network_topic(self, context, **kwargs):
|
||||||
"""Retrieves the network host for a project on this host"""
|
"""Retrieves the network host for a project on this host"""
|
||||||
# TODO(vish): This method should be memoized. This will make
|
# TODO(vish): This method should be memoized. This will make
|
||||||
# the call to get_network_host cheaper, so that
|
# the call to get_network_host cheaper, so that
|
||||||
@ -158,21 +159,22 @@ class ComputeManager(manager.Manager):
|
|||||||
|
|
||||||
@exception.wrap_exception
|
@exception.wrap_exception
|
||||||
def refresh_security_group_rules(self, context,
|
def refresh_security_group_rules(self, context,
|
||||||
security_group_id, **_kwargs):
|
security_group_id, **kwargs):
|
||||||
"""This call passes straight through to the virtualization driver."""
|
"""This call passes straight through to the virtualization driver."""
|
||||||
return self.driver.refresh_security_group_rules(security_group_id)
|
return self.driver.refresh_security_group_rules(security_group_id)
|
||||||
|
|
||||||
@exception.wrap_exception
|
@exception.wrap_exception
|
||||||
def refresh_security_group_members(self, context,
|
def refresh_security_group_members(self, context,
|
||||||
security_group_id, **_kwargs):
|
security_group_id, **kwargs):
|
||||||
"""This call passes straight through to the virtualization driver."""
|
"""This call passes straight through to the virtualization driver."""
|
||||||
return self.driver.refresh_security_group_members(security_group_id)
|
return self.driver.refresh_security_group_members(security_group_id)
|
||||||
|
|
||||||
@exception.wrap_exception
|
@exception.wrap_exception
|
||||||
def run_instance(self, context, instance_id, **_kwargs):
|
def run_instance(self, context, instance_id, **kwargs):
|
||||||
"""Launch a new instance with specified options."""
|
"""Launch a new instance with specified options."""
|
||||||
context = context.elevated()
|
context = context.elevated()
|
||||||
instance_ref = self.db.instance_get(context, instance_id)
|
instance_ref = self.db.instance_get(context, instance_id)
|
||||||
|
instance_ref.onset_files = kwargs.get('onset_files', [])
|
||||||
if instance_ref['name'] in self.driver.list_instances():
|
if instance_ref['name'] in self.driver.list_instances():
|
||||||
raise exception.Error(_("Instance has already been created"))
|
raise exception.Error(_("Instance has already been created"))
|
||||||
LOG.audit(_("instance %s: starting..."), instance_id,
|
LOG.audit(_("instance %s: starting..."), instance_id,
|
||||||
@ -323,28 +325,43 @@ class ComputeManager(manager.Manager):
|
|||||||
"""Set the root/admin password for an instance on this server."""
|
"""Set the root/admin password for an instance on this server."""
|
||||||
context = context.elevated()
|
context = context.elevated()
|
||||||
instance_ref = self.db.instance_get(context, instance_id)
|
instance_ref = self.db.instance_get(context, instance_id)
|
||||||
if instance_ref['state'] != power_state.RUNNING:
|
instance_id = instance_ref['id']
|
||||||
logging.warn('trying to reset the password on a non-running '
|
instance_state = instance_ref['state']
|
||||||
'instance: %s (state: %s expected: %s)',
|
expected_state = power_state.RUNNING
|
||||||
instance_ref['id'],
|
if instance_state != expected_state:
|
||||||
instance_ref['state'],
|
LOG.warn(_('trying to reset the password on a non-running '
|
||||||
power_state.RUNNING)
|
'instance: %(instance_id)s (state: %(instance_state)s '
|
||||||
|
'expected: %(expected_state)s)') % locals())
|
||||||
logging.debug('instance %s: setting admin password',
|
LOG.audit(_('instance %s: setting admin password'),
|
||||||
instance_ref['name'])
|
instance_ref['name'])
|
||||||
if new_pass is None:
|
if new_pass is None:
|
||||||
# Generate a random password
|
# Generate a random password
|
||||||
new_pass = self._generate_password(FLAGS.password_length)
|
new_pass = utils.generate_password(FLAGS.password_length)
|
||||||
|
|
||||||
self.driver.set_admin_password(instance_ref, new_pass)
|
self.driver.set_admin_password(instance_ref, new_pass)
|
||||||
self._update_state(context, instance_id)
|
self._update_state(context, instance_id)
|
||||||
|
|
||||||
def _generate_password(self, length=20):
|
@exception.wrap_exception
|
||||||
"""Generate a random sequence of letters and digits
|
@checks_instance_lock
|
||||||
to be used as a password.
|
def inject_file(self, context, instance_id, path, file_contents):
|
||||||
"""
|
"""Write a file to the specified path on an instance on this server"""
|
||||||
chrs = string.letters + string.digits
|
context = context.elevated()
|
||||||
return "".join([random.choice(chrs) for i in xrange(length)])
|
instance_ref = self.db.instance_get(context, instance_id)
|
||||||
|
instance_id = instance_ref['id']
|
||||||
|
instance_state = instance_ref['state']
|
||||||
|
expected_state = power_state.RUNNING
|
||||||
|
if instance_state != expected_state:
|
||||||
|
LOG.warn(_('trying to inject a file into a non-running '
|
||||||
|
'instance: %(instance_id)s (state: %(instance_state)s '
|
||||||
|
'expected: %(expected_state)s)') % locals())
|
||||||
|
# Files/paths *should* be base64-encoded at this point, but
|
||||||
|
# double-check to make sure.
|
||||||
|
b64_path = utils.ensure_b64_encoding(path)
|
||||||
|
b64_contents = utils.ensure_b64_encoding(file_contents)
|
||||||
|
plain_path = base64.b64decode(b64_path)
|
||||||
|
nm = instance_ref['name']
|
||||||
|
msg = _('instance %(nm)s: injecting file to %(plain_path)s') % locals()
|
||||||
|
LOG.audit(msg)
|
||||||
|
self.driver.inject_file(instance_ref, b64_path, b64_contents)
|
||||||
|
|
||||||
@exception.wrap_exception
|
@exception.wrap_exception
|
||||||
@checks_instance_lock
|
@checks_instance_lock
|
||||||
@ -498,6 +515,18 @@ class ComputeManager(manager.Manager):
|
|||||||
instance_ref = self.db.instance_get(context, instance_id)
|
instance_ref = self.db.instance_get(context, instance_id)
|
||||||
return instance_ref['locked']
|
return instance_ref['locked']
|
||||||
|
|
||||||
|
@checks_instance_lock
|
||||||
|
def reset_network(self, context, instance_id):
|
||||||
|
"""
|
||||||
|
Reset networking on the instance.
|
||||||
|
|
||||||
|
"""
|
||||||
|
context = context.elevated()
|
||||||
|
instance_ref = self.db.instance_get(context, instance_id)
|
||||||
|
LOG.debug(_('instance %s: reset network'), instance_id,
|
||||||
|
context=context)
|
||||||
|
self.driver.reset_network(instance_ref)
|
||||||
|
|
||||||
@exception.wrap_exception
|
@exception.wrap_exception
|
||||||
def get_console_output(self, context, instance_id):
|
def get_console_output(self, context, instance_id):
|
||||||
"""Send the console output for an instance."""
|
"""Send the console output for an instance."""
|
||||||
@ -511,7 +540,7 @@ class ComputeManager(manager.Manager):
|
|||||||
def get_ajax_console(self, context, instance_id):
|
def get_ajax_console(self, context, instance_id):
|
||||||
"""Return connection information for an ajax console"""
|
"""Return connection information for an ajax console"""
|
||||||
context = context.elevated()
|
context = context.elevated()
|
||||||
logging.debug(_("instance %s: getting ajax console"), instance_id)
|
LOG.debug(_("instance %s: getting ajax console"), instance_id)
|
||||||
instance_ref = self.db.instance_get(context, instance_id)
|
instance_ref = self.db.instance_get(context, instance_id)
|
||||||
|
|
||||||
return self.driver.get_ajax_console(instance_ref)
|
return self.driver.get_ajax_console(instance_ref)
|
||||||
|
@ -27,6 +27,7 @@ SHUTDOWN = 0x04
|
|||||||
SHUTOFF = 0x05
|
SHUTOFF = 0x05
|
||||||
CRASHED = 0x06
|
CRASHED = 0x06
|
||||||
SUSPENDED = 0x07
|
SUSPENDED = 0x07
|
||||||
|
FAILED = 0x08
|
||||||
|
|
||||||
|
|
||||||
def name(code):
|
def name(code):
|
||||||
@ -38,5 +39,6 @@ def name(code):
|
|||||||
SHUTDOWN: 'shutdown',
|
SHUTDOWN: 'shutdown',
|
||||||
SHUTOFF: 'shutdown',
|
SHUTOFF: 'shutdown',
|
||||||
CRASHED: 'crashed',
|
CRASHED: 'crashed',
|
||||||
SUSPENDED: 'suspended'}
|
SUSPENDED: 'suspended',
|
||||||
|
FAILED: 'failed to spawn'}
|
||||||
return d[code]
|
return d[code]
|
||||||
|
@ -28,7 +28,6 @@ from nova import utils
|
|||||||
|
|
||||||
|
|
||||||
class RequestContext(object):
|
class RequestContext(object):
|
||||||
|
|
||||||
def __init__(self, user, project, is_admin=None, read_deleted=False,
|
def __init__(self, user, project, is_admin=None, read_deleted=False,
|
||||||
remote_address=None, timestamp=None, request_id=None):
|
remote_address=None, timestamp=None, request_id=None):
|
||||||
if hasattr(user, 'id'):
|
if hasattr(user, 'id'):
|
||||||
@ -53,7 +52,7 @@ class RequestContext(object):
|
|||||||
self.read_deleted = read_deleted
|
self.read_deleted = read_deleted
|
||||||
self.remote_address = remote_address
|
self.remote_address = remote_address
|
||||||
if not timestamp:
|
if not timestamp:
|
||||||
timestamp = datetime.datetime.utcnow()
|
timestamp = utils.utcnow()
|
||||||
if isinstance(timestamp, str) or isinstance(timestamp, unicode):
|
if isinstance(timestamp, str) or isinstance(timestamp, unicode):
|
||||||
timestamp = utils.parse_isotime(timestamp)
|
timestamp = utils.parse_isotime(timestamp)
|
||||||
self.timestamp = timestamp
|
self.timestamp = timestamp
|
||||||
@ -101,7 +100,7 @@ class RequestContext(object):
|
|||||||
return cls(**values)
|
return cls(**values)
|
||||||
|
|
||||||
def elevated(self, read_deleted=False):
|
def elevated(self, read_deleted=False):
|
||||||
"""Return a version of this context with admin flag set"""
|
"""Return a version of this context with admin flag set."""
|
||||||
return RequestContext(self.user_id,
|
return RequestContext(self.user_id,
|
||||||
self.project_id,
|
self.project_id,
|
||||||
True,
|
True,
|
||||||
|
@ -288,11 +288,21 @@ def fixed_ip_disassociate_all_by_timeout(context, host, time):
|
|||||||
return IMPL.fixed_ip_disassociate_all_by_timeout(context, host, time)
|
return IMPL.fixed_ip_disassociate_all_by_timeout(context, host, time)
|
||||||
|
|
||||||
|
|
||||||
|
def fixed_ip_get_all(context):
|
||||||
|
"""Get all defined fixed ips."""
|
||||||
|
return IMPL.fixed_ip_get_all(context)
|
||||||
|
|
||||||
|
|
||||||
def fixed_ip_get_by_address(context, address):
|
def fixed_ip_get_by_address(context, address):
|
||||||
"""Get a fixed ip by address or raise if it does not exist."""
|
"""Get a fixed ip by address or raise if it does not exist."""
|
||||||
return IMPL.fixed_ip_get_by_address(context, address)
|
return IMPL.fixed_ip_get_by_address(context, address)
|
||||||
|
|
||||||
|
|
||||||
|
def fixed_ip_get_all_by_instance(context, instance_id):
|
||||||
|
"""Get fixed ips by instance or raise if none exist."""
|
||||||
|
return IMPL.fixed_ip_get_all_by_instance(context, instance_id)
|
||||||
|
|
||||||
|
|
||||||
def fixed_ip_get_instance(context, address):
|
def fixed_ip_get_instance(context, address):
|
||||||
"""Get an instance for a fixed ip by address."""
|
"""Get an instance for a fixed ip by address."""
|
||||||
return IMPL.fixed_ip_get_instance(context, address)
|
return IMPL.fixed_ip_get_instance(context, address)
|
||||||
@ -500,6 +510,11 @@ def network_get(context, network_id):
|
|||||||
return IMPL.network_get(context, network_id)
|
return IMPL.network_get(context, network_id)
|
||||||
|
|
||||||
|
|
||||||
|
def network_get_all(context):
|
||||||
|
"""Return all defined networks."""
|
||||||
|
return IMPL.network_get_all(context)
|
||||||
|
|
||||||
|
|
||||||
# pylint: disable-msg=C0103
|
# pylint: disable-msg=C0103
|
||||||
def network_get_associated_fixed_ips(context, network_id):
|
def network_get_associated_fixed_ips(context, network_id):
|
||||||
"""Get all network's ips that have been associated."""
|
"""Get all network's ips that have been associated."""
|
||||||
@ -516,6 +531,11 @@ def network_get_by_instance(context, instance_id):
|
|||||||
return IMPL.network_get_by_instance(context, instance_id)
|
return IMPL.network_get_by_instance(context, instance_id)
|
||||||
|
|
||||||
|
|
||||||
|
def network_get_all_by_instance(context, instance_id):
|
||||||
|
"""Get all networks by instance id or raise if none exist."""
|
||||||
|
return IMPL.network_get_all_by_instance(context, instance_id)
|
||||||
|
|
||||||
|
|
||||||
def network_get_index(context, network_id):
|
def network_get_index(context, network_id):
|
||||||
"""Get non-conflicting index for network."""
|
"""Get non-conflicting index for network."""
|
||||||
return IMPL.network_get_index(context, network_id)
|
return IMPL.network_get_index(context, network_id)
|
||||||
|
@ -19,6 +19,7 @@
|
|||||||
Implementation of SQLAlchemy backend.
|
Implementation of SQLAlchemy backend.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import datetime
|
||||||
import warnings
|
import warnings
|
||||||
|
|
||||||
from nova import db
|
from nova import db
|
||||||
@ -578,10 +579,21 @@ def fixed_ip_disassociate_all_by_timeout(_context, host, time):
|
|||||||
'AND instance_id IS NOT NULL '
|
'AND instance_id IS NOT NULL '
|
||||||
'AND allocated = 0',
|
'AND allocated = 0',
|
||||||
{'host': host,
|
{'host': host,
|
||||||
'time': time.isoformat()})
|
'time': time})
|
||||||
return result.rowcount
|
return result.rowcount
|
||||||
|
|
||||||
|
|
||||||
|
@require_admin_context
|
||||||
|
def fixed_ip_get_all(context, session=None):
|
||||||
|
if not session:
|
||||||
|
session = get_session()
|
||||||
|
result = session.query(models.FixedIp).all()
|
||||||
|
if not result:
|
||||||
|
raise exception.NotFound(_('No fixed ips defined'))
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
@require_context
|
@require_context
|
||||||
def fixed_ip_get_by_address(context, address, session=None):
|
def fixed_ip_get_by_address(context, address, session=None):
|
||||||
if not session:
|
if not session:
|
||||||
@ -607,6 +619,17 @@ def fixed_ip_get_instance(context, address):
|
|||||||
return fixed_ip_ref.instance
|
return fixed_ip_ref.instance
|
||||||
|
|
||||||
|
|
||||||
|
@require_context
|
||||||
|
def fixed_ip_get_all_by_instance(context, instance_id):
|
||||||
|
session = get_session()
|
||||||
|
rv = session.query(models.FixedIp).\
|
||||||
|
filter_by(instance_id=instance_id).\
|
||||||
|
filter_by(deleted=False)
|
||||||
|
if not rv:
|
||||||
|
raise exception.NotFound(_('No address for instance %s') % instance_id)
|
||||||
|
return rv
|
||||||
|
|
||||||
|
|
||||||
@require_context
|
@require_context
|
||||||
def fixed_ip_get_instance_v6(context, address):
|
def fixed_ip_get_instance_v6(context, address):
|
||||||
session = get_session()
|
session = get_session()
|
||||||
@ -670,8 +693,14 @@ def instance_data_get_for_project(context, project_id):
|
|||||||
def instance_destroy(context, instance_id):
|
def instance_destroy(context, instance_id):
|
||||||
session = get_session()
|
session = get_session()
|
||||||
with session.begin():
|
with session.begin():
|
||||||
instance_ref = instance_get(context, instance_id, session=session)
|
session.execute('update instances set deleted=1,'
|
||||||
instance_ref.delete(session=session)
|
'deleted_at=:at where id=:id',
|
||||||
|
{'id': instance_id,
|
||||||
|
'at': datetime.datetime.utcnow()})
|
||||||
|
session.execute('update security_group_instance_association '
|
||||||
|
'set deleted=1,deleted_at=:at where instance_id=:id',
|
||||||
|
{'id': instance_id,
|
||||||
|
'at': datetime.datetime.utcnow()})
|
||||||
|
|
||||||
|
|
||||||
@require_context
|
@require_context
|
||||||
@ -712,6 +741,7 @@ def instance_get_all(context):
|
|||||||
return session.query(models.Instance).\
|
return session.query(models.Instance).\
|
||||||
options(joinedload_all('fixed_ip.floating_ips')).\
|
options(joinedload_all('fixed_ip.floating_ips')).\
|
||||||
options(joinedload('security_groups')).\
|
options(joinedload('security_groups')).\
|
||||||
|
options(joinedload_all('fixed_ip.network')).\
|
||||||
filter_by(deleted=can_read_deleted(context)).\
|
filter_by(deleted=can_read_deleted(context)).\
|
||||||
all()
|
all()
|
||||||
|
|
||||||
@ -722,6 +752,7 @@ def instance_get_all_by_user(context, user_id):
|
|||||||
return session.query(models.Instance).\
|
return session.query(models.Instance).\
|
||||||
options(joinedload_all('fixed_ip.floating_ips')).\
|
options(joinedload_all('fixed_ip.floating_ips')).\
|
||||||
options(joinedload('security_groups')).\
|
options(joinedload('security_groups')).\
|
||||||
|
options(joinedload_all('fixed_ip.network')).\
|
||||||
filter_by(deleted=can_read_deleted(context)).\
|
filter_by(deleted=can_read_deleted(context)).\
|
||||||
filter_by(user_id=user_id).\
|
filter_by(user_id=user_id).\
|
||||||
all()
|
all()
|
||||||
@ -733,6 +764,7 @@ def instance_get_all_by_host(context, host):
|
|||||||
return session.query(models.Instance).\
|
return session.query(models.Instance).\
|
||||||
options(joinedload_all('fixed_ip.floating_ips')).\
|
options(joinedload_all('fixed_ip.floating_ips')).\
|
||||||
options(joinedload('security_groups')).\
|
options(joinedload('security_groups')).\
|
||||||
|
options(joinedload_all('fixed_ip.network')).\
|
||||||
filter_by(host=host).\
|
filter_by(host=host).\
|
||||||
filter_by(deleted=can_read_deleted(context)).\
|
filter_by(deleted=can_read_deleted(context)).\
|
||||||
all()
|
all()
|
||||||
@ -746,6 +778,7 @@ def instance_get_all_by_project(context, project_id):
|
|||||||
return session.query(models.Instance).\
|
return session.query(models.Instance).\
|
||||||
options(joinedload_all('fixed_ip.floating_ips')).\
|
options(joinedload_all('fixed_ip.floating_ips')).\
|
||||||
options(joinedload('security_groups')).\
|
options(joinedload('security_groups')).\
|
||||||
|
options(joinedload_all('fixed_ip.network')).\
|
||||||
filter_by(project_id=project_id).\
|
filter_by(project_id=project_id).\
|
||||||
filter_by(deleted=can_read_deleted(context)).\
|
filter_by(deleted=can_read_deleted(context)).\
|
||||||
all()
|
all()
|
||||||
@ -759,6 +792,7 @@ def instance_get_all_by_reservation(context, reservation_id):
|
|||||||
return session.query(models.Instance).\
|
return session.query(models.Instance).\
|
||||||
options(joinedload_all('fixed_ip.floating_ips')).\
|
options(joinedload_all('fixed_ip.floating_ips')).\
|
||||||
options(joinedload('security_groups')).\
|
options(joinedload('security_groups')).\
|
||||||
|
options(joinedload_all('fixed_ip.network')).\
|
||||||
filter_by(reservation_id=reservation_id).\
|
filter_by(reservation_id=reservation_id).\
|
||||||
filter_by(deleted=can_read_deleted(context)).\
|
filter_by(deleted=can_read_deleted(context)).\
|
||||||
all()
|
all()
|
||||||
@ -766,6 +800,7 @@ def instance_get_all_by_reservation(context, reservation_id):
|
|||||||
return session.query(models.Instance).\
|
return session.query(models.Instance).\
|
||||||
options(joinedload_all('fixed_ip.floating_ips')).\
|
options(joinedload_all('fixed_ip.floating_ips')).\
|
||||||
options(joinedload('security_groups')).\
|
options(joinedload('security_groups')).\
|
||||||
|
options(joinedload_all('fixed_ip.network')).\
|
||||||
filter_by(project_id=context.project_id).\
|
filter_by(project_id=context.project_id).\
|
||||||
filter_by(reservation_id=reservation_id).\
|
filter_by(reservation_id=reservation_id).\
|
||||||
filter_by(deleted=False).\
|
filter_by(deleted=False).\
|
||||||
@ -1043,6 +1078,15 @@ def network_get(context, network_id, session=None):
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
@require_admin_context
|
||||||
|
def network_get_all(context):
|
||||||
|
session = get_session()
|
||||||
|
result = session.query(models.Network)
|
||||||
|
if not result:
|
||||||
|
raise exception.NotFound(_('No networks defined'))
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
# NOTE(vish): pylint complains because of the long method name, but
|
# NOTE(vish): pylint complains because of the long method name, but
|
||||||
# it fits with the names of the rest of the methods
|
# it fits with the names of the rest of the methods
|
||||||
# pylint: disable-msg=C0103
|
# pylint: disable-msg=C0103
|
||||||
@ -1086,6 +1130,19 @@ def network_get_by_instance(_context, instance_id):
|
|||||||
return rv
|
return rv
|
||||||
|
|
||||||
|
|
||||||
|
@require_admin_context
|
||||||
|
def network_get_all_by_instance(_context, instance_id):
|
||||||
|
session = get_session()
|
||||||
|
rv = session.query(models.Network).\
|
||||||
|
filter_by(deleted=False).\
|
||||||
|
join(models.Network.fixed_ips).\
|
||||||
|
filter_by(instance_id=instance_id).\
|
||||||
|
filter_by(deleted=False)
|
||||||
|
if not rv:
|
||||||
|
raise exception.NotFound(_('No network for instance %s') % instance_id)
|
||||||
|
return rv
|
||||||
|
|
||||||
|
|
||||||
@require_admin_context
|
@require_admin_context
|
||||||
def network_set_host(context, network_id, host_id):
|
def network_set_host(context, network_id, host_id):
|
||||||
session = get_session()
|
session = get_session()
|
||||||
@ -1583,6 +1640,11 @@ def security_group_destroy(context, security_group_id):
|
|||||||
# TODO(vish): do we have to use sql here?
|
# TODO(vish): do we have to use sql here?
|
||||||
session.execute('update security_groups set deleted=1 where id=:id',
|
session.execute('update security_groups set deleted=1 where id=:id',
|
||||||
{'id': security_group_id})
|
{'id': security_group_id})
|
||||||
|
session.execute('update security_group_instance_association '
|
||||||
|
'set deleted=1,deleted_at=:at '
|
||||||
|
'where security_group_id=:id',
|
||||||
|
{'id': security_group_id,
|
||||||
|
'at': datetime.datetime.utcnow()})
|
||||||
session.execute('update security_group_rules set deleted=1 '
|
session.execute('update security_group_rules set deleted=1 '
|
||||||
'where group_id=:id',
|
'where group_id=:id',
|
||||||
{'id': security_group_id})
|
{'id': security_group_id})
|
||||||
|
@ -134,6 +134,9 @@ instances = Table('instances', meta,
|
|||||||
Column('ramdisk_id',
|
Column('ramdisk_id',
|
||||||
String(length=255, convert_unicode=False, assert_unicode=None,
|
String(length=255, convert_unicode=False, assert_unicode=None,
|
||||||
unicode_error=None, _warn_on_bytestring=False)),
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('server_name',
|
||||||
|
String(length=255, convert_unicode=False, assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
Column('launch_index', Integer()),
|
Column('launch_index', Integer()),
|
||||||
Column('key_name',
|
Column('key_name',
|
||||||
String(length=255, convert_unicode=False, assert_unicode=None,
|
String(length=255, convert_unicode=False, assert_unicode=None,
|
||||||
@ -178,23 +181,6 @@ instances = Table('instances', meta,
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
iscsi_targets = Table('iscsi_targets', meta,
|
|
||||||
Column('created_at', DateTime(timezone=False)),
|
|
||||||
Column('updated_at', DateTime(timezone=False)),
|
|
||||||
Column('deleted_at', DateTime(timezone=False)),
|
|
||||||
Column('deleted', Boolean(create_constraint=True, name=None)),
|
|
||||||
Column('id', Integer(), primary_key=True, nullable=False),
|
|
||||||
Column('target_num', Integer()),
|
|
||||||
Column('host',
|
|
||||||
String(length=255, convert_unicode=False, assert_unicode=None,
|
|
||||||
unicode_error=None, _warn_on_bytestring=False)),
|
|
||||||
Column('volume_id',
|
|
||||||
Integer(),
|
|
||||||
ForeignKey('volumes.id'),
|
|
||||||
nullable=True),
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
key_pairs = Table('key_pairs', meta,
|
key_pairs = Table('key_pairs', meta,
|
||||||
Column('created_at', DateTime(timezone=False)),
|
Column('created_at', DateTime(timezone=False)),
|
||||||
Column('updated_at', DateTime(timezone=False)),
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
@ -522,24 +508,26 @@ def upgrade(migrate_engine):
|
|||||||
# bind migrate_engine to your metadata
|
# bind migrate_engine to your metadata
|
||||||
meta.bind = migrate_engine
|
meta.bind = migrate_engine
|
||||||
|
|
||||||
for table in (auth_tokens, export_devices, fixed_ips, floating_ips,
|
tables = [auth_tokens,
|
||||||
instances, iscsi_targets, key_pairs, networks,
|
instances, key_pairs, networks, fixed_ips, floating_ips,
|
||||||
projects, quotas, security_groups, security_group_inst_assoc,
|
quotas, security_groups, security_group_inst_assoc,
|
||||||
security_group_rules, services, users,
|
security_group_rules, services, users, projects,
|
||||||
user_project_association, user_project_role_association,
|
user_project_association, user_project_role_association,
|
||||||
user_role_association, volumes):
|
user_role_association, volumes, export_devices]
|
||||||
|
for table in tables:
|
||||||
try:
|
try:
|
||||||
table.create()
|
table.create()
|
||||||
except Exception:
|
except Exception:
|
||||||
logging.info(repr(table))
|
logging.info(repr(table))
|
||||||
logging.exception('Exception while creating table')
|
logging.exception('Exception while creating table')
|
||||||
|
meta.drop_all(tables=tables)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
|
|
||||||
def downgrade(migrate_engine):
|
def downgrade(migrate_engine):
|
||||||
# Operations to reverse the above upgrade go here.
|
# Operations to reverse the above upgrade go here.
|
||||||
for table in (auth_tokens, export_devices, fixed_ips, floating_ips,
|
for table in (auth_tokens, export_devices, fixed_ips, floating_ips,
|
||||||
instances, iscsi_targets, key_pairs, networks,
|
instances, key_pairs, networks,
|
||||||
projects, quotas, security_groups, security_group_inst_assoc,
|
projects, quotas, security_groups, security_group_inst_assoc,
|
||||||
security_group_rules, services, users,
|
security_group_rules, services, users,
|
||||||
user_project_association, user_project_role_association,
|
user_project_association, user_project_role_association,
|
||||||
|
@ -41,6 +41,10 @@ networks = Table('networks', meta,
|
|||||||
Column('id', Integer(), primary_key=True, nullable=False),
|
Column('id', Integer(), primary_key=True, nullable=False),
|
||||||
)
|
)
|
||||||
|
|
||||||
|
volumes = Table('volumes', meta,
|
||||||
|
Column('id', Integer(), primary_key=True, nullable=False),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# New Tables
|
# New Tables
|
||||||
@ -131,6 +135,23 @@ instance_actions = Table('instance_actions', meta,
|
|||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
iscsi_targets = Table('iscsi_targets', meta,
|
||||||
|
Column('created_at', DateTime(timezone=False)),
|
||||||
|
Column('updated_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted_at', DateTime(timezone=False)),
|
||||||
|
Column('deleted', Boolean(create_constraint=True, name=None)),
|
||||||
|
Column('id', Integer(), primary_key=True, nullable=False),
|
||||||
|
Column('target_num', Integer()),
|
||||||
|
Column('host',
|
||||||
|
String(length=255, convert_unicode=False, assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False)),
|
||||||
|
Column('volume_id',
|
||||||
|
Integer(),
|
||||||
|
ForeignKey('volumes.id'),
|
||||||
|
nullable=True),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
#
|
#
|
||||||
# Tables to alter
|
# Tables to alter
|
||||||
#
|
#
|
||||||
@ -188,12 +209,16 @@ def upgrade(migrate_engine):
|
|||||||
# Upgrade operations go here. Don't create your own engine;
|
# Upgrade operations go here. Don't create your own engine;
|
||||||
# bind migrate_engine to your metadata
|
# bind migrate_engine to your metadata
|
||||||
meta.bind = migrate_engine
|
meta.bind = migrate_engine
|
||||||
for table in (certificates, consoles, console_pools, instance_actions):
|
|
||||||
|
tables = [certificates, console_pools, consoles, instance_actions,
|
||||||
|
iscsi_targets]
|
||||||
|
for table in tables:
|
||||||
try:
|
try:
|
||||||
table.create()
|
table.create()
|
||||||
except Exception:
|
except Exception:
|
||||||
logging.info(repr(table))
|
logging.info(repr(table))
|
||||||
logging.exception('Exception while creating table')
|
logging.exception('Exception while creating table')
|
||||||
|
meta.drop_all(tables=tables)
|
||||||
raise
|
raise
|
||||||
|
|
||||||
auth_tokens.c.user_id.alter(type=String(length=255,
|
auth_tokens.c.user_id.alter(type=String(length=255,
|
||||||
|
@ -0,0 +1,51 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2011 OpenStack LLC
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
from sqlalchemy import *
|
||||||
|
from migrate import *
|
||||||
|
|
||||||
|
from nova import log as logging
|
||||||
|
|
||||||
|
|
||||||
|
meta = MetaData()
|
||||||
|
|
||||||
|
|
||||||
|
networks = Table('networks', meta,
|
||||||
|
Column('id', Integer(), primary_key=True, nullable=False),
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# New Tables
|
||||||
|
#
|
||||||
|
|
||||||
|
|
||||||
|
#
|
||||||
|
# Tables to alter
|
||||||
|
#
|
||||||
|
|
||||||
|
networks_label = Column(
|
||||||
|
'label',
|
||||||
|
String(length=255, convert_unicode=False, assert_unicode=None,
|
||||||
|
unicode_error=None, _warn_on_bytestring=False))
|
||||||
|
|
||||||
|
|
||||||
|
def upgrade(migrate_engine):
|
||||||
|
# Upgrade operations go here. Don't create your own engine;
|
||||||
|
# bind migrate_engine to your metadata
|
||||||
|
meta.bind = migrate_engine
|
||||||
|
networks.create_column(networks_label)
|
@ -17,12 +17,22 @@
|
|||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
import os
|
import os
|
||||||
|
import sys
|
||||||
|
|
||||||
from nova import flags
|
from nova import flags
|
||||||
|
|
||||||
import sqlalchemy
|
import sqlalchemy
|
||||||
from migrate.versioning import api as versioning_api
|
from migrate.versioning import api as versioning_api
|
||||||
from migrate.versioning import exceptions as versioning_exceptions
|
|
||||||
|
try:
|
||||||
|
from migrate.versioning import exceptions as versioning_exceptions
|
||||||
|
except ImportError:
|
||||||
|
try:
|
||||||
|
# python-migration changed location of exceptions after 1.6.3
|
||||||
|
# See LP Bug #717467
|
||||||
|
from migrate import exceptions as versioning_exceptions
|
||||||
|
except ImportError:
|
||||||
|
sys.exit(_("python-migrate is not installed. Exiting."))
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
@ -46,12 +56,15 @@ def db_version():
|
|||||||
meta.reflect(bind=engine)
|
meta.reflect(bind=engine)
|
||||||
try:
|
try:
|
||||||
for table in ('auth_tokens', 'export_devices', 'fixed_ips',
|
for table in ('auth_tokens', 'export_devices', 'fixed_ips',
|
||||||
'floating_ips', 'instances', 'iscsi_targets',
|
'floating_ips', 'instances',
|
||||||
'key_pairs', 'networks', 'projects', 'quotas',
|
'key_pairs', 'networks', 'projects', 'quotas',
|
||||||
'security_group_rules',
|
'security_group_instance_association',
|
||||||
'security_group_instance_association', 'services',
|
'security_group_rules', 'security_groups',
|
||||||
|
'services',
|
||||||
'users', 'user_project_association',
|
'users', 'user_project_association',
|
||||||
'user_project_role_association', 'volumes'):
|
'user_project_role_association',
|
||||||
|
'user_role_association',
|
||||||
|
'volumes'):
|
||||||
assert table in meta.tables
|
assert table in meta.tables
|
||||||
return db_version_control(1)
|
return db_version_control(1)
|
||||||
except AssertionError:
|
except AssertionError:
|
||||||
|
@ -311,10 +311,14 @@ class SecurityGroup(BASE, NovaBase):
|
|||||||
secondary="security_group_instance_association",
|
secondary="security_group_instance_association",
|
||||||
primaryjoin='and_('
|
primaryjoin='and_('
|
||||||
'SecurityGroup.id == '
|
'SecurityGroup.id == '
|
||||||
'SecurityGroupInstanceAssociation.security_group_id,'
|
'SecurityGroupInstanceAssociation.security_group_id,'
|
||||||
|
'SecurityGroupInstanceAssociation.deleted == False,'
|
||||||
'SecurityGroup.deleted == False)',
|
'SecurityGroup.deleted == False)',
|
||||||
secondaryjoin='and_('
|
secondaryjoin='and_('
|
||||||
'SecurityGroupInstanceAssociation.instance_id == Instance.id,'
|
'SecurityGroupInstanceAssociation.instance_id == Instance.id,'
|
||||||
|
# (anthony) the condition below shouldn't be necessary now that the
|
||||||
|
# association is being marked as deleted. However, removing this
|
||||||
|
# may cause existing deployments to choke, so I'm leaving it
|
||||||
'Instance.deleted == False)',
|
'Instance.deleted == False)',
|
||||||
backref='security_groups')
|
backref='security_groups')
|
||||||
|
|
||||||
@ -369,6 +373,7 @@ class Network(BASE, NovaBase):
|
|||||||
"vpn_public_port"),
|
"vpn_public_port"),
|
||||||
{'mysql_engine': 'InnoDB'})
|
{'mysql_engine': 'InnoDB'})
|
||||||
id = Column(Integer, primary_key=True)
|
id = Column(Integer, primary_key=True)
|
||||||
|
label = Column(String(255))
|
||||||
|
|
||||||
injected = Column(Boolean, default=False)
|
injected = Column(Boolean, default=False)
|
||||||
cidr = Column(String(255), unique=True)
|
cidr = Column(String(255), unique=True)
|
||||||
|
@ -20,6 +20,7 @@ Session Handling for SQLAlchemy backend
|
|||||||
"""
|
"""
|
||||||
|
|
||||||
from sqlalchemy import create_engine
|
from sqlalchemy import create_engine
|
||||||
|
from sqlalchemy import pool
|
||||||
from sqlalchemy.orm import sessionmaker
|
from sqlalchemy.orm import sessionmaker
|
||||||
|
|
||||||
from nova import exception
|
from nova import exception
|
||||||
@ -37,9 +38,14 @@ def get_session(autocommit=True, expire_on_commit=False):
|
|||||||
global _MAKER
|
global _MAKER
|
||||||
if not _MAKER:
|
if not _MAKER:
|
||||||
if not _ENGINE:
|
if not _ENGINE:
|
||||||
|
kwargs = {'pool_recycle': FLAGS.sql_idle_timeout,
|
||||||
|
'echo': False}
|
||||||
|
|
||||||
|
if FLAGS.sql_connection.startswith('sqlite'):
|
||||||
|
kwargs['poolclass'] = pool.NullPool
|
||||||
|
|
||||||
_ENGINE = create_engine(FLAGS.sql_connection,
|
_ENGINE = create_engine(FLAGS.sql_connection,
|
||||||
pool_recycle=FLAGS.sql_idle_timeout,
|
**kwargs)
|
||||||
echo=False)
|
|
||||||
_MAKER = (sessionmaker(bind=_ENGINE,
|
_MAKER = (sessionmaker(bind=_ENGINE,
|
||||||
autocommit=autocommit,
|
autocommit=autocommit,
|
||||||
expire_on_commit=expire_on_commit))
|
expire_on_commit=expire_on_commit))
|
||||||
|
@ -208,7 +208,7 @@ def _get_my_ip():
|
|||||||
(addr, port) = csock.getsockname()
|
(addr, port) = csock.getsockname()
|
||||||
csock.close()
|
csock.close()
|
||||||
return addr
|
return addr
|
||||||
except socket.gaierror as ex:
|
except socket.error as ex:
|
||||||
return "127.0.0.1"
|
return "127.0.0.1"
|
||||||
|
|
||||||
|
|
||||||
@ -282,12 +282,14 @@ DEFINE_integer('auth_token_ttl', 3600, 'Seconds for auth tokens to linger')
|
|||||||
|
|
||||||
DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'),
|
DEFINE_string('state_path', os.path.join(os.path.dirname(__file__), '../'),
|
||||||
"Top-level directory for maintaining nova's state")
|
"Top-level directory for maintaining nova's state")
|
||||||
|
DEFINE_string('logdir', None, 'output to a per-service log file in named '
|
||||||
|
'directory')
|
||||||
|
|
||||||
DEFINE_string('sql_connection',
|
DEFINE_string('sql_connection',
|
||||||
'sqlite:///$state_path/nova.sqlite',
|
'sqlite:///$state_path/nova.sqlite',
|
||||||
'connection string for sql database')
|
'connection string for sql database')
|
||||||
DEFINE_string('sql_idle_timeout',
|
DEFINE_integer('sql_idle_timeout',
|
||||||
'3600',
|
3600,
|
||||||
'timeout for idle sql database connections')
|
'timeout for idle sql database connections')
|
||||||
DEFINE_integer('sql_max_retries', 12, 'sql connection attempts')
|
DEFINE_integer('sql_max_retries', 12, 'sql connection attempts')
|
||||||
DEFINE_integer('sql_retry_interval', 10, 'sql connection retry interval')
|
DEFINE_integer('sql_retry_interval', 10, 'sql connection retry interval')
|
||||||
|
@ -36,6 +36,22 @@ from nova.image import service
|
|||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
|
|
||||||
|
|
||||||
|
def map_s3_to_base(image):
|
||||||
|
"""Convert from S3 format to format defined by BaseImageService."""
|
||||||
|
i = {}
|
||||||
|
i['id'] = image.get('imageId')
|
||||||
|
i['name'] = image.get('imageId')
|
||||||
|
i['kernel_id'] = image.get('kernelId')
|
||||||
|
i['ramdisk_id'] = image.get('ramdiskId')
|
||||||
|
i['location'] = image.get('imageLocation')
|
||||||
|
i['owner_id'] = image.get('imageOwnerId')
|
||||||
|
i['status'] = image.get('imageState')
|
||||||
|
i['type'] = image.get('type')
|
||||||
|
i['is_public'] = image.get('isPublic')
|
||||||
|
i['architecture'] = image.get('architecture')
|
||||||
|
return i
|
||||||
|
|
||||||
|
|
||||||
class S3ImageService(service.BaseImageService):
|
class S3ImageService(service.BaseImageService):
|
||||||
|
|
||||||
def modify(self, context, image_id, operation):
|
def modify(self, context, image_id, operation):
|
||||||
@ -65,26 +81,20 @@ class S3ImageService(service.BaseImageService):
|
|||||||
'image_id': image_id}))
|
'image_id': image_id}))
|
||||||
return image_id
|
return image_id
|
||||||
|
|
||||||
def _fix_image_id(self, images):
|
|
||||||
"""S3 has imageId but OpenStack wants id"""
|
|
||||||
for image in images:
|
|
||||||
if 'imageId' in image:
|
|
||||||
image['id'] = image['imageId']
|
|
||||||
return images
|
|
||||||
|
|
||||||
def index(self, context):
|
def index(self, context):
|
||||||
"""Return a list of all images that a user can see."""
|
"""Return a list of all images that a user can see."""
|
||||||
response = self._conn(context).make_request(
|
response = self._conn(context).make_request(
|
||||||
method='GET',
|
method='GET',
|
||||||
bucket='_images')
|
bucket='_images')
|
||||||
return self._fix_image_id(json.loads(response.read()))
|
images = json.loads(response.read())
|
||||||
|
return [map_s3_to_base(i) for i in images]
|
||||||
|
|
||||||
def show(self, context, image_id):
|
def show(self, context, image_id):
|
||||||
"""return a image object if the context has permissions"""
|
"""return a image object if the context has permissions"""
|
||||||
if FLAGS.connection_type == 'fake':
|
if FLAGS.connection_type == 'fake':
|
||||||
return {'imageId': 'bar'}
|
return {'imageId': 'bar'}
|
||||||
result = self.index(context)
|
result = self.index(context)
|
||||||
result = [i for i in result if i['imageId'] == image_id]
|
result = [i for i in result if i['id'] == image_id]
|
||||||
if not result:
|
if not result:
|
||||||
raise exception.NotFound(_('Image %s could not be found')
|
raise exception.NotFound(_('Image %s could not be found')
|
||||||
% image_id)
|
% image_id)
|
||||||
|
28
nova/log.py
28
nova/log.py
@ -28,9 +28,12 @@ It also allows setting of formatting information through flags.
|
|||||||
|
|
||||||
|
|
||||||
import cStringIO
|
import cStringIO
|
||||||
|
import inspect
|
||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
import logging.handlers
|
import logging.handlers
|
||||||
|
import os
|
||||||
|
import sys
|
||||||
import traceback
|
import traceback
|
||||||
|
|
||||||
from nova import flags
|
from nova import flags
|
||||||
@ -91,7 +94,7 @@ critical = logging.critical
|
|||||||
log = logging.log
|
log = logging.log
|
||||||
# handlers
|
# handlers
|
||||||
StreamHandler = logging.StreamHandler
|
StreamHandler = logging.StreamHandler
|
||||||
FileHandler = logging.FileHandler
|
RotatingFileHandler = logging.handlers.RotatingFileHandler
|
||||||
# logging.SysLogHandler is nicer than logging.logging.handler.SysLogHandler.
|
# logging.SysLogHandler is nicer than logging.logging.handler.SysLogHandler.
|
||||||
SysLogHandler = logging.handlers.SysLogHandler
|
SysLogHandler = logging.handlers.SysLogHandler
|
||||||
|
|
||||||
@ -110,6 +113,18 @@ def _dictify_context(context):
|
|||||||
return context
|
return context
|
||||||
|
|
||||||
|
|
||||||
|
def _get_binary_name():
|
||||||
|
return os.path.basename(inspect.stack()[-1][1])
|
||||||
|
|
||||||
|
|
||||||
|
def get_log_file_path(binary=None):
|
||||||
|
if FLAGS.logfile:
|
||||||
|
return FLAGS.logfile
|
||||||
|
if FLAGS.logdir:
|
||||||
|
binary = binary or _get_binary_name()
|
||||||
|
return '%s.log' % (os.path.join(FLAGS.logdir, binary),)
|
||||||
|
|
||||||
|
|
||||||
def basicConfig():
|
def basicConfig():
|
||||||
logging.basicConfig()
|
logging.basicConfig()
|
||||||
for handler in logging.root.handlers:
|
for handler in logging.root.handlers:
|
||||||
@ -122,8 +137,9 @@ def basicConfig():
|
|||||||
syslog = SysLogHandler(address='/dev/log')
|
syslog = SysLogHandler(address='/dev/log')
|
||||||
syslog.setFormatter(_formatter)
|
syslog.setFormatter(_formatter)
|
||||||
logging.root.addHandler(syslog)
|
logging.root.addHandler(syslog)
|
||||||
if FLAGS.logfile:
|
logpath = get_log_file_path()
|
||||||
logfile = FileHandler(FLAGS.logfile)
|
if logpath:
|
||||||
|
logfile = RotatingFileHandler(logpath)
|
||||||
logfile.setFormatter(_formatter)
|
logfile.setFormatter(_formatter)
|
||||||
logging.root.addHandler(logfile)
|
logging.root.addHandler(logfile)
|
||||||
|
|
||||||
@ -191,6 +207,12 @@ class NovaLogger(logging.Logger):
|
|||||||
kwargs.pop('exc_info')
|
kwargs.pop('exc_info')
|
||||||
self.error(message, **kwargs)
|
self.error(message, **kwargs)
|
||||||
|
|
||||||
|
|
||||||
|
def handle_exception(type, value, tb):
|
||||||
|
logging.root.critical(str(value), exc_info=(type, value, tb))
|
||||||
|
|
||||||
|
|
||||||
|
sys.excepthook = handle_exception
|
||||||
logging.setLoggerClass(NovaLogger)
|
logging.setLoggerClass(NovaLogger)
|
||||||
|
|
||||||
|
|
||||||
|
@ -20,6 +20,7 @@ Implements vlans, bridges, and iptables rules using linux utilities.
|
|||||||
import os
|
import os
|
||||||
|
|
||||||
from nova import db
|
from nova import db
|
||||||
|
from nova import exception
|
||||||
from nova import flags
|
from nova import flags
|
||||||
from nova import log as logging
|
from nova import log as logging
|
||||||
from nova import utils
|
from nova import utils
|
||||||
@ -37,10 +38,13 @@ FLAGS = flags.FLAGS
|
|||||||
flags.DEFINE_string('dhcpbridge_flagfile',
|
flags.DEFINE_string('dhcpbridge_flagfile',
|
||||||
'/etc/nova/nova-dhcpbridge.conf',
|
'/etc/nova/nova-dhcpbridge.conf',
|
||||||
'location of flagfile for dhcpbridge')
|
'location of flagfile for dhcpbridge')
|
||||||
|
flags.DEFINE_string('dhcp_domain',
|
||||||
|
'novalocal',
|
||||||
|
'domain to use for building the hostnames')
|
||||||
|
|
||||||
flags.DEFINE_string('networks_path', '$state_path/networks',
|
flags.DEFINE_string('networks_path', '$state_path/networks',
|
||||||
'Location to keep network config files')
|
'Location to keep network config files')
|
||||||
flags.DEFINE_string('public_interface', 'vlan1',
|
flags.DEFINE_string('public_interface', 'eth0',
|
||||||
'Interface for public IP addresses')
|
'Interface for public IP addresses')
|
||||||
flags.DEFINE_string('vlan_interface', 'eth0',
|
flags.DEFINE_string('vlan_interface', 'eth0',
|
||||||
'network device for vlans')
|
'network device for vlans')
|
||||||
@ -50,6 +54,8 @@ flags.DEFINE_string('routing_source_ip', '$my_ip',
|
|||||||
'Public IP of network host')
|
'Public IP of network host')
|
||||||
flags.DEFINE_bool('use_nova_chains', False,
|
flags.DEFINE_bool('use_nova_chains', False,
|
||||||
'use the nova_ routing chains instead of default')
|
'use the nova_ routing chains instead of default')
|
||||||
|
flags.DEFINE_string('input_chain', 'INPUT',
|
||||||
|
'chain to add nova_input to')
|
||||||
|
|
||||||
flags.DEFINE_string('dns_server', None,
|
flags.DEFINE_string('dns_server', None,
|
||||||
'if set, uses specific dns server for dnsmasq')
|
'if set, uses specific dns server for dnsmasq')
|
||||||
@ -152,6 +158,8 @@ def ensure_floating_forward(floating_ip, fixed_ip):
|
|||||||
"""Ensure floating ip forwarding rule"""
|
"""Ensure floating ip forwarding rule"""
|
||||||
_confirm_rule("PREROUTING", "-t nat -d %s -j DNAT --to %s"
|
_confirm_rule("PREROUTING", "-t nat -d %s -j DNAT --to %s"
|
||||||
% (floating_ip, fixed_ip))
|
% (floating_ip, fixed_ip))
|
||||||
|
_confirm_rule("OUTPUT", "-t nat -d %s -j DNAT --to %s"
|
||||||
|
% (floating_ip, fixed_ip))
|
||||||
_confirm_rule("SNATTING", "-t nat -s %s -j SNAT --to %s"
|
_confirm_rule("SNATTING", "-t nat -s %s -j SNAT --to %s"
|
||||||
% (fixed_ip, floating_ip))
|
% (fixed_ip, floating_ip))
|
||||||
|
|
||||||
@ -160,6 +168,8 @@ def remove_floating_forward(floating_ip, fixed_ip):
|
|||||||
"""Remove forwarding for floating ip"""
|
"""Remove forwarding for floating ip"""
|
||||||
_remove_rule("PREROUTING", "-t nat -d %s -j DNAT --to %s"
|
_remove_rule("PREROUTING", "-t nat -d %s -j DNAT --to %s"
|
||||||
% (floating_ip, fixed_ip))
|
% (floating_ip, fixed_ip))
|
||||||
|
_remove_rule("OUTPUT", "-t nat -d %s -j DNAT --to %s"
|
||||||
|
% (floating_ip, fixed_ip))
|
||||||
_remove_rule("SNATTING", "-t nat -s %s -j SNAT --to %s"
|
_remove_rule("SNATTING", "-t nat -s %s -j SNAT --to %s"
|
||||||
% (fixed_ip, floating_ip))
|
% (fixed_ip, floating_ip))
|
||||||
|
|
||||||
@ -177,32 +187,77 @@ def ensure_vlan(vlan_num):
|
|||||||
LOG.debug(_("Starting VLAN inteface %s"), interface)
|
LOG.debug(_("Starting VLAN inteface %s"), interface)
|
||||||
_execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD")
|
_execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD")
|
||||||
_execute("sudo vconfig add %s %s" % (FLAGS.vlan_interface, vlan_num))
|
_execute("sudo vconfig add %s %s" % (FLAGS.vlan_interface, vlan_num))
|
||||||
_execute("sudo ifconfig %s up" % interface)
|
_execute("sudo ip link set %s up" % interface)
|
||||||
return interface
|
return interface
|
||||||
|
|
||||||
|
|
||||||
def ensure_bridge(bridge, interface, net_attrs=None):
|
def ensure_bridge(bridge, interface, net_attrs=None):
|
||||||
"""Create a bridge unless it already exists"""
|
"""Create a bridge unless it already exists.
|
||||||
|
|
||||||
|
:param interface: the interface to create the bridge on.
|
||||||
|
:param net_attrs: dictionary with attributes used to create the bridge.
|
||||||
|
|
||||||
|
If net_attrs is set, it will add the net_attrs['gateway'] to the bridge
|
||||||
|
using net_attrs['broadcast'] and net_attrs['cidr']. It will also add
|
||||||
|
the ip_v6 address specified in net_attrs['cidr_v6'] if use_ipv6 is set.
|
||||||
|
|
||||||
|
The code will attempt to move any ips that already exist on the interface
|
||||||
|
onto the bridge and reset the default gateway if necessary.
|
||||||
|
"""
|
||||||
if not _device_exists(bridge):
|
if not _device_exists(bridge):
|
||||||
LOG.debug(_("Starting Bridge interface for %s"), interface)
|
LOG.debug(_("Starting Bridge interface for %s"), interface)
|
||||||
_execute("sudo brctl addbr %s" % bridge)
|
_execute("sudo brctl addbr %s" % bridge)
|
||||||
_execute("sudo brctl setfd %s 0" % bridge)
|
_execute("sudo brctl setfd %s 0" % bridge)
|
||||||
# _execute("sudo brctl setageing %s 10" % bridge)
|
# _execute("sudo brctl setageing %s 10" % bridge)
|
||||||
_execute("sudo brctl stp %s off" % bridge)
|
_execute("sudo brctl stp %s off" % bridge)
|
||||||
if interface:
|
_execute("sudo ip link set %s up" % bridge)
|
||||||
_execute("sudo brctl addif %s %s" % (bridge, interface))
|
|
||||||
if net_attrs:
|
if net_attrs:
|
||||||
_execute("sudo ifconfig %s %s broadcast %s netmask %s up" % \
|
# NOTE(vish): The ip for dnsmasq has to be the first address on the
|
||||||
(bridge,
|
# bridge for it to respond to reqests properly
|
||||||
net_attrs['gateway'],
|
suffix = net_attrs['cidr'].rpartition('/')[2]
|
||||||
net_attrs['broadcast'],
|
out, err = _execute("sudo ip addr add %s/%s brd %s dev %s" %
|
||||||
net_attrs['netmask']))
|
(net_attrs['gateway'],
|
||||||
|
suffix,
|
||||||
|
net_attrs['broadcast'],
|
||||||
|
bridge),
|
||||||
|
check_exit_code=False)
|
||||||
|
if err and err != "RTNETLINK answers: File exists\n":
|
||||||
|
raise exception.Error("Failed to add ip: %s" % err)
|
||||||
if(FLAGS.use_ipv6):
|
if(FLAGS.use_ipv6):
|
||||||
_execute("sudo ip -f inet6 addr change %s dev %s" %
|
_execute("sudo ip -f inet6 addr change %s dev %s" %
|
||||||
(net_attrs['cidr_v6'], bridge))
|
(net_attrs['cidr_v6'], bridge))
|
||||||
_execute("sudo ifconfig %s up" % bridge)
|
# NOTE(vish): If the public interface is the same as the
|
||||||
else:
|
# bridge, then the bridge has to be in promiscuous
|
||||||
_execute("sudo ifconfig %s up" % bridge)
|
# to forward packets properly.
|
||||||
|
if(FLAGS.public_interface == bridge):
|
||||||
|
_execute("sudo ip link set dev %s promisc on" % bridge)
|
||||||
|
if interface:
|
||||||
|
# NOTE(vish): This will break if there is already an ip on the
|
||||||
|
# interface, so we move any ips to the bridge
|
||||||
|
gateway = None
|
||||||
|
out, err = _execute("sudo route -n")
|
||||||
|
for line in out.split("\n"):
|
||||||
|
fields = line.split()
|
||||||
|
if fields and fields[0] == "0.0.0.0" and fields[-1] == interface:
|
||||||
|
gateway = fields[1]
|
||||||
|
out, err = _execute("sudo ip addr show dev %s scope global" %
|
||||||
|
interface)
|
||||||
|
for line in out.split("\n"):
|
||||||
|
fields = line.split()
|
||||||
|
if fields and fields[0] == "inet":
|
||||||
|
params = ' '.join(fields[1:-1])
|
||||||
|
_execute("sudo ip addr del %s dev %s" % (params, fields[-1]))
|
||||||
|
_execute("sudo ip addr add %s dev %s" % (params, bridge))
|
||||||
|
if gateway:
|
||||||
|
_execute("sudo route add 0.0.0.0 gw %s" % gateway)
|
||||||
|
out, err = _execute("sudo brctl addif %s %s" %
|
||||||
|
(bridge, interface),
|
||||||
|
check_exit_code=False)
|
||||||
|
|
||||||
|
if (err and err != "device %s is already a member of a bridge; can't "
|
||||||
|
"enslave it to bridge %s.\n" % (interface, bridge)):
|
||||||
|
raise exception.Error("Failed to add interface: %s" % err)
|
||||||
|
|
||||||
if FLAGS.use_nova_chains:
|
if FLAGS.use_nova_chains:
|
||||||
(out, err) = _execute("sudo iptables -N nova_forward",
|
(out, err) = _execute("sudo iptables -N nova_forward",
|
||||||
check_exit_code=False)
|
check_exit_code=False)
|
||||||
@ -313,8 +368,9 @@ interface %s
|
|||||||
def _host_dhcp(fixed_ip_ref):
|
def _host_dhcp(fixed_ip_ref):
|
||||||
"""Return a host string for an address"""
|
"""Return a host string for an address"""
|
||||||
instance_ref = fixed_ip_ref['instance']
|
instance_ref = fixed_ip_ref['instance']
|
||||||
return "%s,%s.novalocal,%s" % (instance_ref['mac_address'],
|
return "%s,%s.%s,%s" % (instance_ref['mac_address'],
|
||||||
instance_ref['hostname'],
|
instance_ref['hostname'],
|
||||||
|
FLAGS.dhcp_domain,
|
||||||
fixed_ip_ref['address'])
|
fixed_ip_ref['address'])
|
||||||
|
|
||||||
|
|
||||||
@ -329,7 +385,8 @@ def _execute(cmd, *args, **kwargs):
|
|||||||
|
|
||||||
def _device_exists(device):
|
def _device_exists(device):
|
||||||
"""Check if ethernet device exists"""
|
"""Check if ethernet device exists"""
|
||||||
(_out, err) = _execute("ifconfig %s" % device, check_exit_code=False)
|
(_out, err) = _execute("ip link show dev %s" % device,
|
||||||
|
check_exit_code=False)
|
||||||
return not err
|
return not err
|
||||||
|
|
||||||
|
|
||||||
@ -359,6 +416,7 @@ def _dnsmasq_cmd(net):
|
|||||||
' --strict-order',
|
' --strict-order',
|
||||||
' --bind-interfaces',
|
' --bind-interfaces',
|
||||||
' --conf-file=',
|
' --conf-file=',
|
||||||
|
' --domain=%s' % FLAGS.dhcp_domain,
|
||||||
' --pid-file=%s' % _dhcp_file(net['bridge'], 'pid'),
|
' --pid-file=%s' % _dhcp_file(net['bridge'], 'pid'),
|
||||||
' --listen-address=%s' % net['gateway'],
|
' --listen-address=%s' % net['gateway'],
|
||||||
' --except-interface=lo',
|
' --except-interface=lo',
|
||||||
|
@ -118,6 +118,10 @@ class NetworkManager(manager.Manager):
|
|||||||
super(NetworkManager, self).__init__(*args, **kwargs)
|
super(NetworkManager, self).__init__(*args, **kwargs)
|
||||||
|
|
||||||
def init_host(self):
|
def init_host(self):
|
||||||
|
"""Do any initialization that needs to be run if this is a
|
||||||
|
standalone service.
|
||||||
|
"""
|
||||||
|
self.driver.init_host()
|
||||||
# Set up networking for the projects for which we're already
|
# Set up networking for the projects for which we're already
|
||||||
# the designated network host.
|
# the designated network host.
|
||||||
ctxt = context.get_admin_context()
|
ctxt = context.get_admin_context()
|
||||||
@ -327,11 +331,12 @@ class FlatManager(NetworkManager):
|
|||||||
pass
|
pass
|
||||||
|
|
||||||
def create_networks(self, context, cidr, num_networks, network_size,
|
def create_networks(self, context, cidr, num_networks, network_size,
|
||||||
cidr_v6, *args, **kwargs):
|
cidr_v6, label, *args, **kwargs):
|
||||||
"""Create networks based on parameters."""
|
"""Create networks based on parameters."""
|
||||||
fixed_net = IPy.IP(cidr)
|
fixed_net = IPy.IP(cidr)
|
||||||
fixed_net_v6 = IPy.IP(cidr_v6)
|
fixed_net_v6 = IPy.IP(cidr_v6)
|
||||||
significant_bits_v6 = 64
|
significant_bits_v6 = 64
|
||||||
|
count = 1
|
||||||
for index in range(num_networks):
|
for index in range(num_networks):
|
||||||
start = index * network_size
|
start = index * network_size
|
||||||
significant_bits = 32 - int(math.log(network_size, 2))
|
significant_bits = 32 - int(math.log(network_size, 2))
|
||||||
@ -344,6 +349,11 @@ class FlatManager(NetworkManager):
|
|||||||
net['gateway'] = str(project_net[1])
|
net['gateway'] = str(project_net[1])
|
||||||
net['broadcast'] = str(project_net.broadcast())
|
net['broadcast'] = str(project_net.broadcast())
|
||||||
net['dhcp_start'] = str(project_net[2])
|
net['dhcp_start'] = str(project_net[2])
|
||||||
|
if num_networks > 1:
|
||||||
|
net['label'] = "%s_%d" % (label, count)
|
||||||
|
else:
|
||||||
|
net['label'] = label
|
||||||
|
count += 1
|
||||||
|
|
||||||
if(FLAGS.use_ipv6):
|
if(FLAGS.use_ipv6):
|
||||||
cidr_v6 = "%s/%s" % (fixed_net_v6[0], significant_bits_v6)
|
cidr_v6 = "%s/%s" % (fixed_net_v6[0], significant_bits_v6)
|
||||||
@ -395,7 +405,6 @@ class FlatDHCPManager(FlatManager):
|
|||||||
standalone service.
|
standalone service.
|
||||||
"""
|
"""
|
||||||
super(FlatDHCPManager, self).init_host()
|
super(FlatDHCPManager, self).init_host()
|
||||||
self.driver.init_host()
|
|
||||||
self.driver.metadata_forward()
|
self.driver.metadata_forward()
|
||||||
|
|
||||||
def setup_compute_network(self, context, instance_id):
|
def setup_compute_network(self, context, instance_id):
|
||||||
@ -465,7 +474,6 @@ class VlanManager(NetworkManager):
|
|||||||
standalone service.
|
standalone service.
|
||||||
"""
|
"""
|
||||||
super(VlanManager, self).init_host()
|
super(VlanManager, self).init_host()
|
||||||
self.driver.init_host()
|
|
||||||
self.driver.metadata_forward()
|
self.driver.metadata_forward()
|
||||||
|
|
||||||
def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
|
def allocate_fixed_ip(self, context, instance_id, *args, **kwargs):
|
||||||
@ -503,6 +511,12 @@ class VlanManager(NetworkManager):
|
|||||||
def create_networks(self, context, cidr, num_networks, network_size,
|
def create_networks(self, context, cidr, num_networks, network_size,
|
||||||
cidr_v6, vlan_start, vpn_start):
|
cidr_v6, vlan_start, vpn_start):
|
||||||
"""Create networks based on parameters."""
|
"""Create networks based on parameters."""
|
||||||
|
# Check that num_networks + vlan_start is not > 4094, fixes lp708025
|
||||||
|
if num_networks + vlan_start > 4094:
|
||||||
|
raise ValueError(_('The sum between the number of networks and'
|
||||||
|
' the vlan start cannot be greater'
|
||||||
|
' than 4094'))
|
||||||
|
|
||||||
fixed_net = IPy.IP(cidr)
|
fixed_net = IPy.IP(cidr)
|
||||||
fixed_net_v6 = IPy.IP(cidr_v6)
|
fixed_net_v6 = IPy.IP(cidr_v6)
|
||||||
network_size_v6 = 1 << 64
|
network_size_v6 = 1 << 64
|
||||||
|
15
nova/rpc.py
15
nova/rpc.py
@ -29,6 +29,7 @@ import uuid
|
|||||||
|
|
||||||
from carrot import connection as carrot_connection
|
from carrot import connection as carrot_connection
|
||||||
from carrot import messaging
|
from carrot import messaging
|
||||||
|
from eventlet import greenpool
|
||||||
from eventlet import greenthread
|
from eventlet import greenthread
|
||||||
|
|
||||||
from nova import context
|
from nova import context
|
||||||
@ -42,11 +43,13 @@ from nova import utils
|
|||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
LOG = logging.getLogger('nova.rpc')
|
LOG = logging.getLogger('nova.rpc')
|
||||||
|
|
||||||
|
flags.DEFINE_integer('rpc_thread_pool_size', 1024, 'Size of RPC thread pool')
|
||||||
|
|
||||||
|
|
||||||
class Connection(carrot_connection.BrokerConnection):
|
class Connection(carrot_connection.BrokerConnection):
|
||||||
"""Connection instance object"""
|
"""Connection instance object"""
|
||||||
@classmethod
|
@classmethod
|
||||||
def instance(cls, new=False):
|
def instance(cls, new=True):
|
||||||
"""Returns the instance"""
|
"""Returns the instance"""
|
||||||
if new or not hasattr(cls, '_instance'):
|
if new or not hasattr(cls, '_instance'):
|
||||||
params = dict(hostname=FLAGS.rabbit_host,
|
params = dict(hostname=FLAGS.rabbit_host,
|
||||||
@ -155,11 +158,15 @@ class AdapterConsumer(TopicConsumer):
|
|||||||
def __init__(self, connection=None, topic="broadcast", proxy=None):
|
def __init__(self, connection=None, topic="broadcast", proxy=None):
|
||||||
LOG.debug(_('Initing the Adapter Consumer for %s') % topic)
|
LOG.debug(_('Initing the Adapter Consumer for %s') % topic)
|
||||||
self.proxy = proxy
|
self.proxy = proxy
|
||||||
|
self.pool = greenpool.GreenPool(FLAGS.rpc_thread_pool_size)
|
||||||
super(AdapterConsumer, self).__init__(connection=connection,
|
super(AdapterConsumer, self).__init__(connection=connection,
|
||||||
topic=topic)
|
topic=topic)
|
||||||
|
|
||||||
|
def receive(self, *args, **kwargs):
|
||||||
|
self.pool.spawn_n(self._receive, *args, **kwargs)
|
||||||
|
|
||||||
@exception.wrap_exception
|
@exception.wrap_exception
|
||||||
def receive(self, message_data, message):
|
def _receive(self, message_data, message):
|
||||||
"""Magically looks for a method on the proxy object and calls it
|
"""Magically looks for a method on the proxy object and calls it
|
||||||
|
|
||||||
Message data should be a dictionary with two keys:
|
Message data should be a dictionary with two keys:
|
||||||
@ -246,7 +253,7 @@ def msg_reply(msg_id, reply=None, failure=None):
|
|||||||
LOG.error(_("Returning exception %s to caller"), message)
|
LOG.error(_("Returning exception %s to caller"), message)
|
||||||
LOG.error(tb)
|
LOG.error(tb)
|
||||||
failure = (failure[0].__name__, str(failure[1]), tb)
|
failure = (failure[0].__name__, str(failure[1]), tb)
|
||||||
conn = Connection.instance(True)
|
conn = Connection.instance()
|
||||||
publisher = DirectPublisher(connection=conn, msg_id=msg_id)
|
publisher = DirectPublisher(connection=conn, msg_id=msg_id)
|
||||||
try:
|
try:
|
||||||
publisher.send({'result': reply, 'failure': failure})
|
publisher.send({'result': reply, 'failure': failure})
|
||||||
@ -319,7 +326,7 @@ def call(context, topic, msg):
|
|||||||
self.result = data['result']
|
self.result = data['result']
|
||||||
|
|
||||||
wait_msg = WaitMessage()
|
wait_msg = WaitMessage()
|
||||||
conn = Connection.instance(True)
|
conn = Connection.instance()
|
||||||
consumer = DirectConsumer(connection=conn, msg_id=msg_id)
|
consumer = DirectConsumer(connection=conn, msg_id=msg_id)
|
||||||
consumer.register_callback(wait_msg)
|
consumer.register_callback(wait_msg)
|
||||||
|
|
||||||
|
@ -92,31 +92,3 @@ class RateLimitingMiddlewareTest(unittest.TestCase):
|
|||||||
self.assertEqual(middleware.limiter.__class__.__name__, "Limiter")
|
self.assertEqual(middleware.limiter.__class__.__name__, "Limiter")
|
||||||
middleware = RateLimitingMiddleware(simple_wsgi, service_host='foobar')
|
middleware = RateLimitingMiddleware(simple_wsgi, service_host='foobar')
|
||||||
self.assertEqual(middleware.limiter.__class__.__name__, "WSGIAppProxy")
|
self.assertEqual(middleware.limiter.__class__.__name__, "WSGIAppProxy")
|
||||||
|
|
||||||
|
|
||||||
class LimiterTest(unittest.TestCase):
|
|
||||||
|
|
||||||
def test_limiter(self):
|
|
||||||
items = range(2000)
|
|
||||||
req = Request.blank('/')
|
|
||||||
self.assertEqual(limited(items, req), items[:1000])
|
|
||||||
req = Request.blank('/?offset=0')
|
|
||||||
self.assertEqual(limited(items, req), items[:1000])
|
|
||||||
req = Request.blank('/?offset=3')
|
|
||||||
self.assertEqual(limited(items, req), items[3:1003])
|
|
||||||
req = Request.blank('/?offset=2005')
|
|
||||||
self.assertEqual(limited(items, req), [])
|
|
||||||
req = Request.blank('/?limit=10')
|
|
||||||
self.assertEqual(limited(items, req), items[:10])
|
|
||||||
req = Request.blank('/?limit=0')
|
|
||||||
self.assertEqual(limited(items, req), items[:1000])
|
|
||||||
req = Request.blank('/?limit=3000')
|
|
||||||
self.assertEqual(limited(items, req), items[:1000])
|
|
||||||
req = Request.blank('/?offset=1&limit=3')
|
|
||||||
self.assertEqual(limited(items, req), items[1:4])
|
|
||||||
req = Request.blank('/?offset=3&limit=0')
|
|
||||||
self.assertEqual(limited(items, req), items[3:1003])
|
|
||||||
req = Request.blank('/?offset=3&limit=1500')
|
|
||||||
self.assertEqual(limited(items, req), items[3:1003])
|
|
||||||
req = Request.blank('/?offset=3000&limit=10')
|
|
||||||
self.assertEqual(limited(items, req), [])
|
|
||||||
|
161
nova/tests/api/openstack/test_common.py
Normal file
161
nova/tests/api/openstack/test_common.py
Normal file
@ -0,0 +1,161 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2010 OpenStack LLC.
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
|
||||||
|
"""
|
||||||
|
Test suites for 'common' code used throughout the OpenStack HTTP API.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import unittest
|
||||||
|
|
||||||
|
from webob import Request
|
||||||
|
|
||||||
|
from nova.api.openstack.common import limited
|
||||||
|
|
||||||
|
|
||||||
|
class LimiterTest(unittest.TestCase):
|
||||||
|
"""
|
||||||
|
Unit tests for the `nova.api.openstack.common.limited` method which takes
|
||||||
|
in a list of items and, depending on the 'offset' and 'limit' GET params,
|
||||||
|
returns a subset or complete set of the given items.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def setUp(self):
|
||||||
|
"""
|
||||||
|
Run before each test.
|
||||||
|
"""
|
||||||
|
self.tiny = range(1)
|
||||||
|
self.small = range(10)
|
||||||
|
self.medium = range(1000)
|
||||||
|
self.large = range(10000)
|
||||||
|
|
||||||
|
def test_limiter_offset_zero(self):
|
||||||
|
"""
|
||||||
|
Test offset key works with 0.
|
||||||
|
"""
|
||||||
|
req = Request.blank('/?offset=0')
|
||||||
|
self.assertEqual(limited(self.tiny, req), self.tiny)
|
||||||
|
self.assertEqual(limited(self.small, req), self.small)
|
||||||
|
self.assertEqual(limited(self.medium, req), self.medium)
|
||||||
|
self.assertEqual(limited(self.large, req), self.large[:1000])
|
||||||
|
|
||||||
|
def test_limiter_offset_medium(self):
|
||||||
|
"""
|
||||||
|
Test offset key works with a medium sized number.
|
||||||
|
"""
|
||||||
|
req = Request.blank('/?offset=10')
|
||||||
|
self.assertEqual(limited(self.tiny, req), [])
|
||||||
|
self.assertEqual(limited(self.small, req), self.small[10:])
|
||||||
|
self.assertEqual(limited(self.medium, req), self.medium[10:])
|
||||||
|
self.assertEqual(limited(self.large, req), self.large[10:1010])
|
||||||
|
|
||||||
|
def test_limiter_offset_over_max(self):
|
||||||
|
"""
|
||||||
|
Test offset key works with a number over 1000 (max_limit).
|
||||||
|
"""
|
||||||
|
req = Request.blank('/?offset=1001')
|
||||||
|
self.assertEqual(limited(self.tiny, req), [])
|
||||||
|
self.assertEqual(limited(self.small, req), [])
|
||||||
|
self.assertEqual(limited(self.medium, req), [])
|
||||||
|
self.assertEqual(limited(self.large, req), self.large[1001:2001])
|
||||||
|
|
||||||
|
def test_limiter_offset_blank(self):
|
||||||
|
"""
|
||||||
|
Test offset key works with a blank offset.
|
||||||
|
"""
|
||||||
|
req = Request.blank('/?offset=')
|
||||||
|
self.assertEqual(limited(self.tiny, req), self.tiny)
|
||||||
|
self.assertEqual(limited(self.small, req), self.small)
|
||||||
|
self.assertEqual(limited(self.medium, req), self.medium)
|
||||||
|
self.assertEqual(limited(self.large, req), self.large[:1000])
|
||||||
|
|
||||||
|
def test_limiter_offset_bad(self):
|
||||||
|
"""
|
||||||
|
Test offset key works with a BAD offset.
|
||||||
|
"""
|
||||||
|
req = Request.blank(u'/?offset=\u0020aa')
|
||||||
|
self.assertEqual(limited(self.tiny, req), self.tiny)
|
||||||
|
self.assertEqual(limited(self.small, req), self.small)
|
||||||
|
self.assertEqual(limited(self.medium, req), self.medium)
|
||||||
|
self.assertEqual(limited(self.large, req), self.large[:1000])
|
||||||
|
|
||||||
|
def test_limiter_nothing(self):
|
||||||
|
"""
|
||||||
|
Test request with no offset or limit
|
||||||
|
"""
|
||||||
|
req = Request.blank('/')
|
||||||
|
self.assertEqual(limited(self.tiny, req), self.tiny)
|
||||||
|
self.assertEqual(limited(self.small, req), self.small)
|
||||||
|
self.assertEqual(limited(self.medium, req), self.medium)
|
||||||
|
self.assertEqual(limited(self.large, req), self.large[:1000])
|
||||||
|
|
||||||
|
def test_limiter_limit_zero(self):
|
||||||
|
"""
|
||||||
|
Test limit of zero.
|
||||||
|
"""
|
||||||
|
req = Request.blank('/?limit=0')
|
||||||
|
self.assertEqual(limited(self.tiny, req), self.tiny)
|
||||||
|
self.assertEqual(limited(self.small, req), self.small)
|
||||||
|
self.assertEqual(limited(self.medium, req), self.medium)
|
||||||
|
self.assertEqual(limited(self.large, req), self.large[:1000])
|
||||||
|
|
||||||
|
def test_limiter_limit_medium(self):
|
||||||
|
"""
|
||||||
|
Test limit of 10.
|
||||||
|
"""
|
||||||
|
req = Request.blank('/?limit=10')
|
||||||
|
self.assertEqual(limited(self.tiny, req), self.tiny)
|
||||||
|
self.assertEqual(limited(self.small, req), self.small)
|
||||||
|
self.assertEqual(limited(self.medium, req), self.medium[:10])
|
||||||
|
self.assertEqual(limited(self.large, req), self.large[:10])
|
||||||
|
|
||||||
|
def test_limiter_limit_over_max(self):
|
||||||
|
"""
|
||||||
|
Test limit of 3000.
|
||||||
|
"""
|
||||||
|
req = Request.blank('/?limit=3000')
|
||||||
|
self.assertEqual(limited(self.tiny, req), self.tiny)
|
||||||
|
self.assertEqual(limited(self.small, req), self.small)
|
||||||
|
self.assertEqual(limited(self.medium, req), self.medium)
|
||||||
|
self.assertEqual(limited(self.large, req), self.large[:1000])
|
||||||
|
|
||||||
|
def test_limiter_limit_and_offset(self):
|
||||||
|
"""
|
||||||
|
Test request with both limit and offset.
|
||||||
|
"""
|
||||||
|
items = range(2000)
|
||||||
|
req = Request.blank('/?offset=1&limit=3')
|
||||||
|
self.assertEqual(limited(items, req), items[1:4])
|
||||||
|
req = Request.blank('/?offset=3&limit=0')
|
||||||
|
self.assertEqual(limited(items, req), items[3:1003])
|
||||||
|
req = Request.blank('/?offset=3&limit=1500')
|
||||||
|
self.assertEqual(limited(items, req), items[3:1003])
|
||||||
|
req = Request.blank('/?offset=3000&limit=10')
|
||||||
|
self.assertEqual(limited(items, req), [])
|
||||||
|
|
||||||
|
def test_limiter_custom_max_limit(self):
|
||||||
|
"""
|
||||||
|
Test a max_limit other than 1000.
|
||||||
|
"""
|
||||||
|
items = range(2000)
|
||||||
|
req = Request.blank('/?offset=1&limit=3')
|
||||||
|
self.assertEqual(limited(items, req, max_limit=2000), items[1:4])
|
||||||
|
req = Request.blank('/?offset=3&limit=0')
|
||||||
|
self.assertEqual(limited(items, req, max_limit=2000), items[3:])
|
||||||
|
req = Request.blank('/?offset=3&limit=2500')
|
||||||
|
self.assertEqual(limited(items, req, max_limit=2000), items[3:])
|
||||||
|
req = Request.blank('/?offset=3000&limit=10')
|
||||||
|
self.assertEqual(limited(items, req, max_limit=2000), [])
|
@ -15,6 +15,7 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
import datetime
|
||||||
import json
|
import json
|
||||||
import unittest
|
import unittest
|
||||||
|
|
||||||
@ -39,6 +40,13 @@ def return_server(context, id):
|
|||||||
return stub_instance(id)
|
return stub_instance(id)
|
||||||
|
|
||||||
|
|
||||||
|
def return_server_with_addresses(private, public):
|
||||||
|
def _return_server(context, id):
|
||||||
|
return stub_instance(id, private_address=private,
|
||||||
|
public_addresses=public)
|
||||||
|
return _return_server
|
||||||
|
|
||||||
|
|
||||||
def return_servers(context, user_id=1):
|
def return_servers(context, user_id=1):
|
||||||
return [stub_instance(i, user_id) for i in xrange(5)]
|
return [stub_instance(i, user_id) for i in xrange(5)]
|
||||||
|
|
||||||
@ -55,9 +63,45 @@ def instance_address(context, instance_id):
|
|||||||
return None
|
return None
|
||||||
|
|
||||||
|
|
||||||
def stub_instance(id, user_id=1):
|
def stub_instance(id, user_id=1, private_address=None, public_addresses=None):
|
||||||
return Instance(id=id, state=0, image_id=10, user_id=user_id,
|
if public_addresses == None:
|
||||||
display_name='server%s' % id)
|
public_addresses = list()
|
||||||
|
|
||||||
|
instance = {
|
||||||
|
"id": id,
|
||||||
|
"admin_pass": "",
|
||||||
|
"user_id": user_id,
|
||||||
|
"project_id": "",
|
||||||
|
"image_id": 10,
|
||||||
|
"kernel_id": "",
|
||||||
|
"ramdisk_id": "",
|
||||||
|
"launch_index": 0,
|
||||||
|
"key_name": "",
|
||||||
|
"key_data": "",
|
||||||
|
"state": 0,
|
||||||
|
"state_description": "",
|
||||||
|
"memory_mb": 0,
|
||||||
|
"vcpus": 0,
|
||||||
|
"local_gb": 0,
|
||||||
|
"hostname": "",
|
||||||
|
"host": "",
|
||||||
|
"instance_type": "",
|
||||||
|
"user_data": "",
|
||||||
|
"reservation_id": "",
|
||||||
|
"mac_address": "",
|
||||||
|
"scheduled_at": datetime.datetime.now(),
|
||||||
|
"launched_at": datetime.datetime.now(),
|
||||||
|
"terminated_at": datetime.datetime.now(),
|
||||||
|
"availability_zone": "",
|
||||||
|
"display_name": "server%s" % id,
|
||||||
|
"display_description": "",
|
||||||
|
"locked": False}
|
||||||
|
|
||||||
|
instance["fixed_ip"] = {
|
||||||
|
"address": private_address,
|
||||||
|
"floating_ips": [{"address":ip} for ip in public_addresses]}
|
||||||
|
|
||||||
|
return instance
|
||||||
|
|
||||||
|
|
||||||
def fake_compute_api(cls, req, id):
|
def fake_compute_api(cls, req, id):
|
||||||
@ -105,6 +149,22 @@ class ServersTest(unittest.TestCase):
|
|||||||
self.assertEqual(res_dict['server']['id'], '1')
|
self.assertEqual(res_dict['server']['id'], '1')
|
||||||
self.assertEqual(res_dict['server']['name'], 'server1')
|
self.assertEqual(res_dict['server']['name'], 'server1')
|
||||||
|
|
||||||
|
def test_get_server_by_id_with_addresses(self):
|
||||||
|
private = "192.168.0.3"
|
||||||
|
public = ["1.2.3.4"]
|
||||||
|
new_return_server = return_server_with_addresses(private, public)
|
||||||
|
self.stubs.Set(nova.db.api, 'instance_get', new_return_server)
|
||||||
|
req = webob.Request.blank('/v1.0/servers/1')
|
||||||
|
res = req.get_response(fakes.wsgi_app())
|
||||||
|
res_dict = json.loads(res.body)
|
||||||
|
self.assertEqual(res_dict['server']['id'], '1')
|
||||||
|
self.assertEqual(res_dict['server']['name'], 'server1')
|
||||||
|
addresses = res_dict['server']['addresses']
|
||||||
|
self.assertEqual(len(addresses["public"]), len(public))
|
||||||
|
self.assertEqual(addresses["public"][0], public[0])
|
||||||
|
self.assertEqual(len(addresses["private"]), 1)
|
||||||
|
self.assertEqual(addresses["private"][0], private)
|
||||||
|
|
||||||
def test_get_server_list(self):
|
def test_get_server_list(self):
|
||||||
req = webob.Request.blank('/v1.0/servers')
|
req = webob.Request.blank('/v1.0/servers')
|
||||||
res = req.get_response(fakes.wsgi_app())
|
res = req.get_response(fakes.wsgi_app())
|
||||||
@ -281,6 +341,18 @@ class ServersTest(unittest.TestCase):
|
|||||||
res = req.get_response(fakes.wsgi_app())
|
res = req.get_response(fakes.wsgi_app())
|
||||||
self.assertEqual(res.status_int, 202)
|
self.assertEqual(res.status_int, 202)
|
||||||
|
|
||||||
|
def test_server_reset_network(self):
|
||||||
|
FLAGS.allow_admin_api = True
|
||||||
|
body = dict(server=dict(
|
||||||
|
name='server_test', imageId=2, flavorId=2, metadata={},
|
||||||
|
personality={}))
|
||||||
|
req = webob.Request.blank('/v1.0/servers/1/reset_network')
|
||||||
|
req.method = 'POST'
|
||||||
|
req.content_type = 'application/json'
|
||||||
|
req.body = json.dumps(body)
|
||||||
|
res = req.get_response(fakes.wsgi_app())
|
||||||
|
self.assertEqual(res.status_int, 202)
|
||||||
|
|
||||||
def test_server_diagnostics(self):
|
def test_server_diagnostics(self):
|
||||||
req = webob.Request.blank("/v1.0/servers/1/diagnostics")
|
req = webob.Request.blank("/v1.0/servers/1/diagnostics")
|
||||||
req.method = "GET"
|
req.method = "GET"
|
||||||
|
BIN
nova/tests/db/nova.austin.sqlite
Normal file
BIN
nova/tests/db/nova.austin.sqlite
Normal file
Binary file not shown.
@ -248,16 +248,14 @@ class ApiEc2TestCase(test.TestCase):
|
|||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
|
|
||||||
rv = self.ec2.get_all_security_groups()
|
rv = self.ec2.get_all_security_groups()
|
||||||
# I don't bother checkng that we actually find it here,
|
|
||||||
# because the create/delete unit test further up should
|
group = [grp for grp in rv if grp.name == security_group_name][0]
|
||||||
# be good enough for that.
|
|
||||||
for group in rv:
|
self.assertEquals(len(group.rules), 1)
|
||||||
if group.name == security_group_name:
|
self.assertEquals(int(group.rules[0].from_port), 80)
|
||||||
self.assertEquals(len(group.rules), 1)
|
self.assertEquals(int(group.rules[0].to_port), 81)
|
||||||
self.assertEquals(int(group.rules[0].from_port), 80)
|
self.assertEquals(len(group.rules[0].grants), 1)
|
||||||
self.assertEquals(int(group.rules[0].to_port), 81)
|
self.assertEquals(str(group.rules[0].grants[0]), '0.0.0.0/0')
|
||||||
self.assertEquals(len(group.rules[0].grants), 1)
|
|
||||||
self.assertEquals(str(group.rules[0].grants[0]), '0.0.0.0/0')
|
|
||||||
|
|
||||||
self.expect_http()
|
self.expect_http()
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
@ -314,16 +312,13 @@ class ApiEc2TestCase(test.TestCase):
|
|||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
|
|
||||||
rv = self.ec2.get_all_security_groups()
|
rv = self.ec2.get_all_security_groups()
|
||||||
# I don't bother checkng that we actually find it here,
|
|
||||||
# because the create/delete unit test further up should
|
group = [grp for grp in rv if grp.name == security_group_name][0]
|
||||||
# be good enough for that.
|
self.assertEquals(len(group.rules), 1)
|
||||||
for group in rv:
|
self.assertEquals(int(group.rules[0].from_port), 80)
|
||||||
if group.name == security_group_name:
|
self.assertEquals(int(group.rules[0].to_port), 81)
|
||||||
self.assertEquals(len(group.rules), 1)
|
self.assertEquals(len(group.rules[0].grants), 1)
|
||||||
self.assertEquals(int(group.rules[0].from_port), 80)
|
self.assertEquals(str(group.rules[0].grants[0]), '::/0')
|
||||||
self.assertEquals(int(group.rules[0].to_port), 81)
|
|
||||||
self.assertEquals(len(group.rules[0].grants), 1)
|
|
||||||
self.assertEquals(str(group.rules[0].grants[0]), '::/0')
|
|
||||||
|
|
||||||
self.expect_http()
|
self.expect_http()
|
||||||
self.mox.ReplayAll()
|
self.mox.ReplayAll()
|
||||||
|
@ -49,7 +49,7 @@ class ComputeTestCase(test.TestCase):
|
|||||||
self.manager = manager.AuthManager()
|
self.manager = manager.AuthManager()
|
||||||
self.user = self.manager.create_user('fake', 'fake', 'fake')
|
self.user = self.manager.create_user('fake', 'fake', 'fake')
|
||||||
self.project = self.manager.create_project('fake', 'fake', 'fake')
|
self.project = self.manager.create_project('fake', 'fake', 'fake')
|
||||||
self.context = context.get_admin_context()
|
self.context = context.RequestContext('fake', 'fake', False)
|
||||||
|
|
||||||
def tearDown(self):
|
def tearDown(self):
|
||||||
self.manager.delete_user(self.user)
|
self.manager.delete_user(self.user)
|
||||||
@ -69,6 +69,13 @@ class ComputeTestCase(test.TestCase):
|
|||||||
inst['ami_launch_index'] = 0
|
inst['ami_launch_index'] = 0
|
||||||
return db.instance_create(self.context, inst)['id']
|
return db.instance_create(self.context, inst)['id']
|
||||||
|
|
||||||
|
def _create_group(self):
|
||||||
|
values = {'name': 'testgroup',
|
||||||
|
'description': 'testgroup',
|
||||||
|
'user_id': self.user.id,
|
||||||
|
'project_id': self.project.id}
|
||||||
|
return db.security_group_create(self.context, values)
|
||||||
|
|
||||||
def test_create_instance_defaults_display_name(self):
|
def test_create_instance_defaults_display_name(self):
|
||||||
"""Verify that an instance cannot be created without a display_name."""
|
"""Verify that an instance cannot be created without a display_name."""
|
||||||
cases = [dict(), dict(display_name=None)]
|
cases = [dict(), dict(display_name=None)]
|
||||||
@ -82,23 +89,55 @@ class ComputeTestCase(test.TestCase):
|
|||||||
|
|
||||||
def test_create_instance_associates_security_groups(self):
|
def test_create_instance_associates_security_groups(self):
|
||||||
"""Make sure create associates security groups"""
|
"""Make sure create associates security groups"""
|
||||||
values = {'name': 'default',
|
group = self._create_group()
|
||||||
'description': 'default',
|
|
||||||
'user_id': self.user.id,
|
|
||||||
'project_id': self.project.id}
|
|
||||||
group = db.security_group_create(self.context, values)
|
|
||||||
ref = self.compute_api.create(
|
ref = self.compute_api.create(
|
||||||
self.context,
|
self.context,
|
||||||
instance_type=FLAGS.default_instance_type,
|
instance_type=FLAGS.default_instance_type,
|
||||||
image_id=None,
|
image_id=None,
|
||||||
security_group=['default'])
|
security_group=['testgroup'])
|
||||||
try:
|
try:
|
||||||
self.assertEqual(len(db.security_group_get_by_instance(
|
self.assertEqual(len(db.security_group_get_by_instance(
|
||||||
self.context, ref[0]['id'])), 1)
|
self.context, ref[0]['id'])), 1)
|
||||||
|
group = db.security_group_get(self.context, group['id'])
|
||||||
|
self.assert_(len(group.instances) == 1)
|
||||||
finally:
|
finally:
|
||||||
db.security_group_destroy(self.context, group['id'])
|
db.security_group_destroy(self.context, group['id'])
|
||||||
db.instance_destroy(self.context, ref[0]['id'])
|
db.instance_destroy(self.context, ref[0]['id'])
|
||||||
|
|
||||||
|
def test_destroy_instance_disassociates_security_groups(self):
|
||||||
|
"""Make sure destroying disassociates security groups"""
|
||||||
|
group = self._create_group()
|
||||||
|
|
||||||
|
ref = self.compute_api.create(
|
||||||
|
self.context,
|
||||||
|
instance_type=FLAGS.default_instance_type,
|
||||||
|
image_id=None,
|
||||||
|
security_group=['testgroup'])
|
||||||
|
try:
|
||||||
|
db.instance_destroy(self.context, ref[0]['id'])
|
||||||
|
group = db.security_group_get(self.context, group['id'])
|
||||||
|
self.assert_(len(group.instances) == 0)
|
||||||
|
finally:
|
||||||
|
db.security_group_destroy(self.context, group['id'])
|
||||||
|
|
||||||
|
def test_destroy_security_group_disassociates_instances(self):
|
||||||
|
"""Make sure destroying security groups disassociates instances"""
|
||||||
|
group = self._create_group()
|
||||||
|
|
||||||
|
ref = self.compute_api.create(
|
||||||
|
self.context,
|
||||||
|
instance_type=FLAGS.default_instance_type,
|
||||||
|
image_id=None,
|
||||||
|
security_group=['testgroup'])
|
||||||
|
|
||||||
|
try:
|
||||||
|
db.security_group_destroy(self.context, group['id'])
|
||||||
|
group = db.security_group_get(context.get_admin_context(
|
||||||
|
read_deleted=True), group['id'])
|
||||||
|
self.assert_(len(group.instances) == 0)
|
||||||
|
finally:
|
||||||
|
db.instance_destroy(self.context, ref[0]['id'])
|
||||||
|
|
||||||
def test_run_terminate(self):
|
def test_run_terminate(self):
|
||||||
"""Make sure it is possible to run and terminate instance"""
|
"""Make sure it is possible to run and terminate instance"""
|
||||||
instance_id = self._create_instance()
|
instance_id = self._create_instance()
|
||||||
@ -163,6 +202,14 @@ class ComputeTestCase(test.TestCase):
|
|||||||
self.compute.set_admin_password(self.context, instance_id)
|
self.compute.set_admin_password(self.context, instance_id)
|
||||||
self.compute.terminate_instance(self.context, instance_id)
|
self.compute.terminate_instance(self.context, instance_id)
|
||||||
|
|
||||||
|
def test_inject_file(self):
|
||||||
|
"""Ensure we can write a file to an instance"""
|
||||||
|
instance_id = self._create_instance()
|
||||||
|
self.compute.run_instance(self.context, instance_id)
|
||||||
|
self.compute.inject_file(self.context, instance_id, "/tmp/test",
|
||||||
|
"File Contents")
|
||||||
|
self.compute.terminate_instance(self.context, instance_id)
|
||||||
|
|
||||||
def test_snapshot(self):
|
def test_snapshot(self):
|
||||||
"""Ensure instance can be snapshotted"""
|
"""Ensure instance can be snapshotted"""
|
||||||
instance_id = self._create_instance()
|
instance_id = self._create_instance()
|
||||||
|
@ -46,6 +46,27 @@ class RootLoggerTestCase(test.TestCase):
|
|||||||
self.assert_(True) # didn't raise exception
|
self.assert_(True) # didn't raise exception
|
||||||
|
|
||||||
|
|
||||||
|
class LogHandlerTestCase(test.TestCase):
|
||||||
|
def test_log_path_logdir(self):
|
||||||
|
self.flags(logdir='/some/path')
|
||||||
|
self.assertEquals(log.get_log_file_path(binary='foo-bar'),
|
||||||
|
'/some/path/foo-bar.log')
|
||||||
|
|
||||||
|
def test_log_path_logfile(self):
|
||||||
|
self.flags(logfile='/some/path/foo-bar.log')
|
||||||
|
self.assertEquals(log.get_log_file_path(binary='foo-bar'),
|
||||||
|
'/some/path/foo-bar.log')
|
||||||
|
|
||||||
|
def test_log_path_none(self):
|
||||||
|
self.assertTrue(log.get_log_file_path(binary='foo-bar') is None)
|
||||||
|
|
||||||
|
def test_log_path_logfile_overrides_logdir(self):
|
||||||
|
self.flags(logdir='/some/other/path',
|
||||||
|
logfile='/some/path/foo-bar.log')
|
||||||
|
self.assertEquals(log.get_log_file_path(binary='foo-bar'),
|
||||||
|
'/some/path/foo-bar.log')
|
||||||
|
|
||||||
|
|
||||||
class NovaFormatterTestCase(test.TestCase):
|
class NovaFormatterTestCase(test.TestCase):
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(NovaFormatterTestCase, self).setUp()
|
super(NovaFormatterTestCase, self).setUp()
|
||||||
|
@ -32,6 +32,7 @@ from nova.virt import xenapi_conn
|
|||||||
from nova.virt.xenapi import fake as xenapi_fake
|
from nova.virt.xenapi import fake as xenapi_fake
|
||||||
from nova.virt.xenapi import volume_utils
|
from nova.virt.xenapi import volume_utils
|
||||||
from nova.virt.xenapi.vmops import SimpleDH
|
from nova.virt.xenapi.vmops import SimpleDH
|
||||||
|
from nova.virt.xenapi.vmops import VMOps
|
||||||
from nova.tests.db import fakes as db_fakes
|
from nova.tests.db import fakes as db_fakes
|
||||||
from nova.tests.xenapi import stubs
|
from nova.tests.xenapi import stubs
|
||||||
from nova.tests.glance import stubs as glance_stubs
|
from nova.tests.glance import stubs as glance_stubs
|
||||||
@ -141,6 +142,10 @@ class XenAPIVolumeTestCase(test.TestCase):
|
|||||||
self.stubs.UnsetAll()
|
self.stubs.UnsetAll()
|
||||||
|
|
||||||
|
|
||||||
|
def reset_network(*args):
|
||||||
|
pass
|
||||||
|
|
||||||
|
|
||||||
class XenAPIVMTestCase(test.TestCase):
|
class XenAPIVMTestCase(test.TestCase):
|
||||||
"""
|
"""
|
||||||
Unit tests for VM operations
|
Unit tests for VM operations
|
||||||
@ -162,6 +167,7 @@ class XenAPIVMTestCase(test.TestCase):
|
|||||||
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
|
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
|
||||||
stubs.stubout_get_this_vm_uuid(self.stubs)
|
stubs.stubout_get_this_vm_uuid(self.stubs)
|
||||||
stubs.stubout_stream_disk(self.stubs)
|
stubs.stubout_stream_disk(self.stubs)
|
||||||
|
self.stubs.Set(VMOps, 'reset_network', reset_network)
|
||||||
glance_stubs.stubout_glance_client(self.stubs,
|
glance_stubs.stubout_glance_client(self.stubs,
|
||||||
glance_stubs.FakeGlance)
|
glance_stubs.FakeGlance)
|
||||||
self.conn = xenapi_conn.get_connection(False)
|
self.conn = xenapi_conn.get_connection(False)
|
||||||
@ -243,7 +249,8 @@ class XenAPIVMTestCase(test.TestCase):
|
|||||||
# Check that the VM is running according to XenAPI.
|
# Check that the VM is running according to XenAPI.
|
||||||
self.assertEquals(vm['power_state'], 'Running')
|
self.assertEquals(vm['power_state'], 'Running')
|
||||||
|
|
||||||
def _test_spawn(self, image_id, kernel_id, ramdisk_id):
|
def _test_spawn(self, image_id, kernel_id, ramdisk_id,
|
||||||
|
instance_type="m1.large"):
|
||||||
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
|
stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests)
|
||||||
values = {'name': 1,
|
values = {'name': 1,
|
||||||
'id': 1,
|
'id': 1,
|
||||||
@ -252,7 +259,7 @@ class XenAPIVMTestCase(test.TestCase):
|
|||||||
'image_id': image_id,
|
'image_id': image_id,
|
||||||
'kernel_id': kernel_id,
|
'kernel_id': kernel_id,
|
||||||
'ramdisk_id': ramdisk_id,
|
'ramdisk_id': ramdisk_id,
|
||||||
'instance_type': 'm1.large',
|
'instance_type': instance_type,
|
||||||
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
'mac_address': 'aa:bb:cc:dd:ee:ff',
|
||||||
}
|
}
|
||||||
conn = xenapi_conn.get_connection(False)
|
conn = xenapi_conn.get_connection(False)
|
||||||
@ -260,6 +267,12 @@ class XenAPIVMTestCase(test.TestCase):
|
|||||||
conn.spawn(instance)
|
conn.spawn(instance)
|
||||||
self.check_vm_record(conn)
|
self.check_vm_record(conn)
|
||||||
|
|
||||||
|
def test_spawn_not_enough_memory(self):
|
||||||
|
FLAGS.xenapi_image_service = 'glance'
|
||||||
|
self.assertRaises(Exception,
|
||||||
|
self._test_spawn,
|
||||||
|
1, 2, 3, "m1.xlarge")
|
||||||
|
|
||||||
def test_spawn_raw_objectstore(self):
|
def test_spawn_raw_objectstore(self):
|
||||||
FLAGS.xenapi_image_service = 'objectstore'
|
FLAGS.xenapi_image_service = 'objectstore'
|
||||||
self._test_spawn(1, None, None)
|
self._test_spawn(1, None, None)
|
||||||
|
@ -43,8 +43,6 @@ else:
|
|||||||
|
|
||||||
|
|
||||||
FLAGS = flags.FLAGS
|
FLAGS = flags.FLAGS
|
||||||
flags.DEFINE_string('logdir', None, 'directory to keep log files in '
|
|
||||||
'(will be prepended to $logfile)')
|
|
||||||
|
|
||||||
|
|
||||||
class TwistdServerOptions(ServerOptions):
|
class TwistdServerOptions(ServerOptions):
|
||||||
|
@ -20,13 +20,14 @@
|
|||||||
System-level utilities and helper functions.
|
System-level utilities and helper functions.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
|
import base64
|
||||||
import datetime
|
import datetime
|
||||||
import inspect
|
import inspect
|
||||||
import json
|
import json
|
||||||
import os
|
import os
|
||||||
import random
|
import random
|
||||||
import subprocess
|
|
||||||
import socket
|
import socket
|
||||||
|
import string
|
||||||
import struct
|
import struct
|
||||||
import sys
|
import sys
|
||||||
import time
|
import time
|
||||||
@ -36,6 +37,7 @@ import netaddr
|
|||||||
|
|
||||||
from eventlet import event
|
from eventlet import event
|
||||||
from eventlet import greenthread
|
from eventlet import greenthread
|
||||||
|
from eventlet.green import subprocess
|
||||||
|
|
||||||
from nova import exception
|
from nova import exception
|
||||||
from nova.exception import ProcessExecutionError
|
from nova.exception import ProcessExecutionError
|
||||||
@ -152,6 +154,42 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True):
|
|||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def ssh_execute(ssh, cmd, process_input=None,
|
||||||
|
addl_env=None, check_exit_code=True):
|
||||||
|
LOG.debug(_("Running cmd (SSH): %s"), cmd)
|
||||||
|
if addl_env:
|
||||||
|
raise exception.Error("Environment not supported over SSH")
|
||||||
|
|
||||||
|
if process_input:
|
||||||
|
# This is (probably) fixable if we need it...
|
||||||
|
raise exception.Error("process_input not supported over SSH")
|
||||||
|
|
||||||
|
stdin_stream, stdout_stream, stderr_stream = ssh.exec_command(cmd)
|
||||||
|
channel = stdout_stream.channel
|
||||||
|
|
||||||
|
#stdin.write('process_input would go here')
|
||||||
|
#stdin.flush()
|
||||||
|
|
||||||
|
# NOTE(justinsb): This seems suspicious...
|
||||||
|
# ...other SSH clients have buffering issues with this approach
|
||||||
|
stdout = stdout_stream.read()
|
||||||
|
stderr = stderr_stream.read()
|
||||||
|
stdin_stream.close()
|
||||||
|
|
||||||
|
exit_status = channel.recv_exit_status()
|
||||||
|
|
||||||
|
# exit_status == -1 if no exit code was returned
|
||||||
|
if exit_status != -1:
|
||||||
|
LOG.debug(_("Result was %s") % exit_status)
|
||||||
|
if check_exit_code and exit_status != 0:
|
||||||
|
raise exception.ProcessExecutionError(exit_code=exit_status,
|
||||||
|
stdout=stdout,
|
||||||
|
stderr=stderr,
|
||||||
|
cmd=cmd)
|
||||||
|
|
||||||
|
return (stdout, stderr)
|
||||||
|
|
||||||
|
|
||||||
def abspath(s):
|
def abspath(s):
|
||||||
return os.path.join(os.path.dirname(__file__), s)
|
return os.path.join(os.path.dirname(__file__), s)
|
||||||
|
|
||||||
@ -199,6 +237,15 @@ def generate_mac():
|
|||||||
return ':'.join(map(lambda x: "%02x" % x, mac))
|
return ':'.join(map(lambda x: "%02x" % x, mac))
|
||||||
|
|
||||||
|
|
||||||
|
def generate_password(length=20):
|
||||||
|
"""Generate a random sequence of letters and digits
|
||||||
|
to be used as a password. Note that this is not intended
|
||||||
|
to represent the ultimate in security.
|
||||||
|
"""
|
||||||
|
chrs = string.letters + string.digits
|
||||||
|
return "".join([random.choice(chrs) for i in xrange(length)])
|
||||||
|
|
||||||
|
|
||||||
def last_octet(address):
|
def last_octet(address):
|
||||||
return int(address.split(".")[-1])
|
return int(address.split(".")[-1])
|
||||||
|
|
||||||
@ -440,3 +487,15 @@ def dumps(value):
|
|||||||
|
|
||||||
def loads(s):
|
def loads(s):
|
||||||
return json.loads(s)
|
return json.loads(s)
|
||||||
|
|
||||||
|
|
||||||
|
def ensure_b64_encoding(val):
|
||||||
|
"""Safety method to ensure that values expected to be base64-encoded
|
||||||
|
actually are. If they are, the value is returned unchanged. Otherwise,
|
||||||
|
the encoded value is returned.
|
||||||
|
"""
|
||||||
|
try:
|
||||||
|
dummy = base64.decode(val)
|
||||||
|
return val
|
||||||
|
except TypeError:
|
||||||
|
return base64.b64encode(val)
|
||||||
|
@ -21,7 +21,7 @@ except ImportError:
|
|||||||
'revision_id': 'LOCALREVISION',
|
'revision_id': 'LOCALREVISION',
|
||||||
'revno': 0}
|
'revno': 0}
|
||||||
|
|
||||||
NOVA_VERSION = ['2011', '1']
|
NOVA_VERSION = ['2011', '2']
|
||||||
YEAR, COUNT = NOVA_VERSION
|
YEAR, COUNT = NOVA_VERSION
|
||||||
|
|
||||||
FINAL = False # This becomes true at Release Candidate time
|
FINAL = False # This becomes true at Release Candidate time
|
||||||
|
@ -152,6 +152,21 @@ class FakeConnection(object):
|
|||||||
"""
|
"""
|
||||||
pass
|
pass
|
||||||
|
|
||||||
|
def inject_file(self, instance, b64_path, b64_contents):
|
||||||
|
"""
|
||||||
|
Writes a file on the specified instance.
|
||||||
|
|
||||||
|
The first parameter is an instance of nova.compute.service.Instance,
|
||||||
|
and so the instance is being specified as instance.name. The second
|
||||||
|
parameter is the base64-encoded path to which the file is to be
|
||||||
|
written on the instance; the third is the contents of the file, also
|
||||||
|
base64-encoded.
|
||||||
|
|
||||||
|
The work will be done asynchronously. This function returns a
|
||||||
|
task that allows the caller to detect when it is complete.
|
||||||
|
"""
|
||||||
|
pass
|
||||||
|
|
||||||
def rescue(self, instance):
|
def rescue(self, instance):
|
||||||
"""
|
"""
|
||||||
Rescue the specified instance.
|
Rescue the specified instance.
|
||||||
|
@ -286,6 +286,10 @@ class SessionBase(object):
|
|||||||
rec['currently_attached'] = False
|
rec['currently_attached'] = False
|
||||||
rec['device'] = ''
|
rec['device'] = ''
|
||||||
|
|
||||||
|
def host_compute_free_memory(self, _1, ref):
|
||||||
|
#Always return 12GB available
|
||||||
|
return 12 * 1024 * 1024 * 1024
|
||||||
|
|
||||||
def xenapi_request(self, methodname, params):
|
def xenapi_request(self, methodname, params):
|
||||||
if methodname.startswith('login'):
|
if methodname.startswith('login'):
|
||||||
self._login(methodname, params)
|
self._login(methodname, params)
|
||||||
|
@ -138,6 +138,16 @@ class VMHelper(HelperBase):
|
|||||||
LOG.debug(_('Created VM %(instance_name)s as %(vm_ref)s.') % locals())
|
LOG.debug(_('Created VM %(instance_name)s as %(vm_ref)s.') % locals())
|
||||||
return vm_ref
|
return vm_ref
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def ensure_free_mem(cls, session, instance):
|
||||||
|
instance_type = instance_types.INSTANCE_TYPES[instance.instance_type]
|
||||||
|
mem = long(instance_type['memory_mb']) * 1024 * 1024
|
||||||
|
#get free memory from host
|
||||||
|
host = session.get_xenapi_host()
|
||||||
|
host_free_mem = long(session.get_xenapi().host.
|
||||||
|
compute_free_memory(host))
|
||||||
|
return host_free_mem >= mem
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def create_vbd(cls, session, vm_ref, vdi_ref, userdevice, bootable):
|
def create_vbd(cls, session, vm_ref, vdi_ref, userdevice, bootable):
|
||||||
"""Create a VBD record. Returns a Deferred that gives the new
|
"""Create a VBD record. Returns a Deferred that gives the new
|
||||||
@ -384,7 +394,7 @@ class VMHelper(HelperBase):
|
|||||||
pv = True
|
pv = True
|
||||||
elif pv_str.lower() == 'false':
|
elif pv_str.lower() == 'false':
|
||||||
pv = False
|
pv = False
|
||||||
LOG.debug(_("PV Kernel in VDI:%d"), pv)
|
LOG.debug(_("PV Kernel in VDI:%s"), pv)
|
||||||
return pv
|
return pv
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
@ -439,6 +449,14 @@ class VMHelper(HelperBase):
|
|||||||
else:
|
else:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def lookup_kernel_ramdisk(cls, session, vm):
|
||||||
|
vm_rec = session.get_xenapi().VM.get_record(vm)
|
||||||
|
if 'PV_kernel' in vm_rec and 'PV_ramdisk' in vm_rec:
|
||||||
|
return (vm_rec['PV_kernel'], vm_rec['PV_ramdisk'])
|
||||||
|
else:
|
||||||
|
return (None, None)
|
||||||
|
|
||||||
@classmethod
|
@classmethod
|
||||||
def compile_info(cls, record):
|
def compile_info(cls, record):
|
||||||
"""Fill record with VM status information"""
|
"""Fill record with VM status information"""
|
||||||
|
@ -67,13 +67,19 @@ class VMOps(object):
|
|||||||
raise exception.Duplicate(_('Attempted to create'
|
raise exception.Duplicate(_('Attempted to create'
|
||||||
' non-unique name %s') % instance.name)
|
' non-unique name %s') % instance.name)
|
||||||
|
|
||||||
bridge = db.network_get_by_instance(context.get_admin_context(),
|
#ensure enough free memory is available
|
||||||
instance['id'])['bridge']
|
if not VMHelper.ensure_free_mem(self._session, instance):
|
||||||
network_ref = \
|
name = instance['name']
|
||||||
NetworkHelper.find_network_with_bridge(self._session, bridge)
|
LOG.exception(_('instance %(name)s: not enough free memory')
|
||||||
|
% locals())
|
||||||
|
db.instance_set_state(context.get_admin_context(),
|
||||||
|
instance['id'],
|
||||||
|
power_state.SHUTDOWN)
|
||||||
|
return
|
||||||
|
|
||||||
user = AuthManager().get_user(instance.user_id)
|
user = AuthManager().get_user(instance.user_id)
|
||||||
project = AuthManager().get_project(instance.project_id)
|
project = AuthManager().get_project(instance.project_id)
|
||||||
|
|
||||||
#if kernel is not present we must download a raw disk
|
#if kernel is not present we must download a raw disk
|
||||||
if instance.kernel_id:
|
if instance.kernel_id:
|
||||||
disk_image_type = ImageType.DISK
|
disk_image_type = ImageType.DISK
|
||||||
@ -99,16 +105,70 @@ class VMOps(object):
|
|||||||
instance, kernel, ramdisk, pv_kernel)
|
instance, kernel, ramdisk, pv_kernel)
|
||||||
VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True)
|
VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True)
|
||||||
|
|
||||||
if network_ref:
|
# write network info
|
||||||
VMHelper.create_vif(self._session, vm_ref,
|
admin_context = context.get_admin_context()
|
||||||
network_ref, instance.mac_address)
|
|
||||||
|
# TODO(tr3buchet) - remove comment in multi-nic
|
||||||
|
# I've decided to go ahead and consider multiple IPs and networks
|
||||||
|
# at this stage even though they aren't implemented because these will
|
||||||
|
# be needed for multi-nic and there was no sense writing it for single
|
||||||
|
# network/single IP and then having to turn around and re-write it
|
||||||
|
IPs = db.fixed_ip_get_all_by_instance(admin_context, instance['id'])
|
||||||
|
for network in db.network_get_all_by_instance(admin_context,
|
||||||
|
instance['id']):
|
||||||
|
network_IPs = [ip for ip in IPs if ip.network_id == network.id]
|
||||||
|
|
||||||
|
def ip_dict(ip):
|
||||||
|
return {'netmask': network['netmask'],
|
||||||
|
'enabled': '1',
|
||||||
|
'ip': ip.address}
|
||||||
|
|
||||||
|
mac_id = instance.mac_address.replace(':', '')
|
||||||
|
location = 'vm-data/networking/%s' % mac_id
|
||||||
|
mapping = {'label': network['label'],
|
||||||
|
'gateway': network['gateway'],
|
||||||
|
'mac': instance.mac_address,
|
||||||
|
'dns': [network['dns']],
|
||||||
|
'ips': [ip_dict(ip) for ip in network_IPs]}
|
||||||
|
self.write_to_param_xenstore(vm_ref, {location: mapping})
|
||||||
|
|
||||||
|
# TODO(tr3buchet) - remove comment in multi-nic
|
||||||
|
# this bit here about creating the vifs will be updated
|
||||||
|
# in multi-nic to handle multiple IPs on the same network
|
||||||
|
# and multiple networks
|
||||||
|
# for now it works as there is only one of each
|
||||||
|
bridge = network['bridge']
|
||||||
|
network_ref = \
|
||||||
|
NetworkHelper.find_network_with_bridge(self._session, bridge)
|
||||||
|
|
||||||
|
if network_ref:
|
||||||
|
VMHelper.create_vif(self._session, vm_ref,
|
||||||
|
network_ref, instance.mac_address)
|
||||||
|
|
||||||
LOG.debug(_('Starting VM %s...'), vm_ref)
|
LOG.debug(_('Starting VM %s...'), vm_ref)
|
||||||
self._session.call_xenapi('VM.start', vm_ref, False, False)
|
self._session.call_xenapi('VM.start', vm_ref, False, False)
|
||||||
instance_name = instance.name
|
instance_name = instance.name
|
||||||
LOG.info(_('Spawning VM %(instance_name)s created %(vm_ref)s.')
|
LOG.info(_('Spawning VM %(instance_name)s created %(vm_ref)s.')
|
||||||
% locals())
|
% locals())
|
||||||
|
|
||||||
|
def _inject_onset_files():
|
||||||
|
onset_files = instance.onset_files
|
||||||
|
if onset_files:
|
||||||
|
# Check if this is a JSON-encoded string and convert if needed.
|
||||||
|
if isinstance(onset_files, basestring):
|
||||||
|
try:
|
||||||
|
onset_files = json.loads(onset_files)
|
||||||
|
except ValueError:
|
||||||
|
LOG.exception(_("Invalid value for onset_files: '%s'")
|
||||||
|
% onset_files)
|
||||||
|
onset_files = []
|
||||||
|
# Inject any files, if specified
|
||||||
|
for path, contents in instance.onset_files:
|
||||||
|
LOG.debug(_("Injecting file path: '%s'") % path)
|
||||||
|
self.inject_file(instance, path, contents)
|
||||||
# NOTE(armando): Do we really need to do this in virt?
|
# NOTE(armando): Do we really need to do this in virt?
|
||||||
|
# NOTE(tr3buchet): not sure but wherever we do it, we need to call
|
||||||
|
# reset_network afterwards
|
||||||
timer = utils.LoopingCall(f=None)
|
timer = utils.LoopingCall(f=None)
|
||||||
|
|
||||||
def _wait_for_boot():
|
def _wait_for_boot():
|
||||||
@ -119,6 +179,8 @@ class VMOps(object):
|
|||||||
if state == power_state.RUNNING:
|
if state == power_state.RUNNING:
|
||||||
LOG.debug(_('Instance %s: booted'), instance['name'])
|
LOG.debug(_('Instance %s: booted'), instance['name'])
|
||||||
timer.stop()
|
timer.stop()
|
||||||
|
_inject_onset_files()
|
||||||
|
return True
|
||||||
except Exception, exc:
|
except Exception, exc:
|
||||||
LOG.warn(exc)
|
LOG.warn(exc)
|
||||||
LOG.exception(_('instance %s: failed to boot'),
|
LOG.exception(_('instance %s: failed to boot'),
|
||||||
@ -127,8 +189,13 @@ class VMOps(object):
|
|||||||
instance['id'],
|
instance['id'],
|
||||||
power_state.SHUTDOWN)
|
power_state.SHUTDOWN)
|
||||||
timer.stop()
|
timer.stop()
|
||||||
|
return False
|
||||||
|
|
||||||
timer.f = _wait_for_boot
|
timer.f = _wait_for_boot
|
||||||
|
|
||||||
|
# call reset networking
|
||||||
|
self.reset_network(instance)
|
||||||
|
|
||||||
return timer.start(interval=0.5, now=True)
|
return timer.start(interval=0.5, now=True)
|
||||||
|
|
||||||
def _get_vm_opaque_ref(self, instance_or_vm):
|
def _get_vm_opaque_ref(self, instance_or_vm):
|
||||||
@ -161,7 +228,8 @@ class VMOps(object):
|
|||||||
instance_name = instance_or_vm.name
|
instance_name = instance_or_vm.name
|
||||||
vm = VMHelper.lookup(self._session, instance_name)
|
vm = VMHelper.lookup(self._session, instance_name)
|
||||||
if vm is None:
|
if vm is None:
|
||||||
raise Exception(_('Instance not present %s') % instance_name)
|
raise exception.NotFound(
|
||||||
|
_('Instance not present %s') % instance_name)
|
||||||
return vm
|
return vm
|
||||||
|
|
||||||
def snapshot(self, instance, image_id):
|
def snapshot(self, instance, image_id):
|
||||||
@ -255,6 +323,32 @@ class VMOps(object):
|
|||||||
raise RuntimeError(resp_dict['message'])
|
raise RuntimeError(resp_dict['message'])
|
||||||
return resp_dict['message']
|
return resp_dict['message']
|
||||||
|
|
||||||
|
def inject_file(self, instance, b64_path, b64_contents):
|
||||||
|
"""Write a file to the VM instance. The path to which it is to be
|
||||||
|
written and the contents of the file need to be supplied; both should
|
||||||
|
be base64-encoded to prevent errors with non-ASCII characters being
|
||||||
|
transmitted. If the agent does not support file injection, or the user
|
||||||
|
has disabled it, a NotImplementedError will be raised.
|
||||||
|
"""
|
||||||
|
# Files/paths *should* be base64-encoded at this point, but
|
||||||
|
# double-check to make sure.
|
||||||
|
b64_path = utils.ensure_b64_encoding(b64_path)
|
||||||
|
b64_contents = utils.ensure_b64_encoding(b64_contents)
|
||||||
|
|
||||||
|
# Need to uniquely identify this request.
|
||||||
|
transaction_id = str(uuid.uuid4())
|
||||||
|
args = {'id': transaction_id, 'b64_path': b64_path,
|
||||||
|
'b64_contents': b64_contents}
|
||||||
|
# If the agent doesn't support file injection, a NotImplementedError
|
||||||
|
# will be raised with the appropriate message.
|
||||||
|
resp = self._make_agent_call('inject_file', instance, '', args)
|
||||||
|
resp_dict = json.loads(resp)
|
||||||
|
if resp_dict['returncode'] != '0':
|
||||||
|
# There was some other sort of error; the message will contain
|
||||||
|
# a description of the error.
|
||||||
|
raise RuntimeError(resp_dict['message'])
|
||||||
|
return resp_dict['message']
|
||||||
|
|
||||||
def _shutdown(self, instance, vm):
|
def _shutdown(self, instance, vm):
|
||||||
"""Shutdown an instance """
|
"""Shutdown an instance """
|
||||||
state = self.get_info(instance['name'])['state']
|
state = self.get_info(instance['name'])['state']
|
||||||
@ -286,8 +380,23 @@ class VMOps(object):
|
|||||||
def _destroy_vm(self, instance, vm):
|
def _destroy_vm(self, instance, vm):
|
||||||
"""Destroys a VM record """
|
"""Destroys a VM record """
|
||||||
try:
|
try:
|
||||||
task = self._session.call_xenapi('Async.VM.destroy', vm)
|
kernel = None
|
||||||
self._session.wait_for_task(instance.id, task)
|
ramdisk = None
|
||||||
|
if instance.kernel_id or instance.ramdisk_id:
|
||||||
|
(kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(
|
||||||
|
self._session, vm)
|
||||||
|
task1 = self._session.call_xenapi('Async.VM.destroy', vm)
|
||||||
|
LOG.debug(_("Removing kernel/ramdisk files"))
|
||||||
|
fn = "remove_kernel_ramdisk"
|
||||||
|
args = {}
|
||||||
|
if kernel:
|
||||||
|
args['kernel-file'] = kernel
|
||||||
|
if ramdisk:
|
||||||
|
args['ramdisk-file'] = ramdisk
|
||||||
|
task2 = self._session.async_call_plugin('glance', fn, args)
|
||||||
|
self._session.wait_for_task(instance.id, task1)
|
||||||
|
self._session.wait_for_task(instance.id, task2)
|
||||||
|
LOG.debug(_("kernel/ramdisk files removed"))
|
||||||
except self.XenAPI.Failure, exc:
|
except self.XenAPI.Failure, exc:
|
||||||
LOG.exception(exc)
|
LOG.exception(exc)
|
||||||
|
|
||||||
@ -374,6 +483,14 @@ class VMOps(object):
|
|||||||
# TODO: implement this!
|
# TODO: implement this!
|
||||||
return 'http://fakeajaxconsole/fake_url'
|
return 'http://fakeajaxconsole/fake_url'
|
||||||
|
|
||||||
|
def reset_network(self, instance):
|
||||||
|
"""
|
||||||
|
Creates uuid arg to pass to make_agent_call and calls it.
|
||||||
|
|
||||||
|
"""
|
||||||
|
args = {'id': str(uuid.uuid4())}
|
||||||
|
resp = self._make_agent_call('resetnetwork', instance, '', args)
|
||||||
|
|
||||||
def list_from_xenstore(self, vm, path):
|
def list_from_xenstore(self, vm, path):
|
||||||
"""Runs the xenstore-ls command to get a listing of all records
|
"""Runs the xenstore-ls command to get a listing of all records
|
||||||
from 'path' downward. Returns a dict with the sub-paths as keys,
|
from 'path' downward. Returns a dict with the sub-paths as keys,
|
||||||
@ -443,6 +560,11 @@ class VMOps(object):
|
|||||||
if 'TIMEOUT:' in err_msg:
|
if 'TIMEOUT:' in err_msg:
|
||||||
LOG.error(_('TIMEOUT: The call to %(method)s timed out. '
|
LOG.error(_('TIMEOUT: The call to %(method)s timed out. '
|
||||||
'VM id=%(instance_id)s; args=%(strargs)s') % locals())
|
'VM id=%(instance_id)s; args=%(strargs)s') % locals())
|
||||||
|
elif 'NOT IMPLEMENTED:' in err_msg:
|
||||||
|
LOG.error(_('NOT IMPLEMENTED: The call to %(method)s is not'
|
||||||
|
' supported by the agent. VM id=%(instance_id)s;'
|
||||||
|
' args=%(strargs)s') % locals())
|
||||||
|
raise NotImplementedError(err_msg)
|
||||||
else:
|
else:
|
||||||
LOG.error(_('The call to %(method)s returned an error: %(e)s. '
|
LOG.error(_('The call to %(method)s returned an error: %(e)s. '
|
||||||
'VM id=%(instance_id)s; args=%(strargs)s') % locals())
|
'VM id=%(instance_id)s; args=%(strargs)s') % locals())
|
||||||
|
@ -168,6 +168,12 @@ class XenAPIConnection(object):
|
|||||||
"""Set the root/admin password on the VM instance"""
|
"""Set the root/admin password on the VM instance"""
|
||||||
self._vmops.set_admin_password(instance, new_pass)
|
self._vmops.set_admin_password(instance, new_pass)
|
||||||
|
|
||||||
|
def inject_file(self, instance, b64_path, b64_contents):
|
||||||
|
"""Create a file on the VM instance. The file path and contents
|
||||||
|
should be base64-encoded.
|
||||||
|
"""
|
||||||
|
self._vmops.inject_file(instance, b64_path, b64_contents)
|
||||||
|
|
||||||
def destroy(self, instance):
|
def destroy(self, instance):
|
||||||
"""Destroy VM instance"""
|
"""Destroy VM instance"""
|
||||||
self._vmops.destroy(instance)
|
self._vmops.destroy(instance)
|
||||||
@ -188,6 +194,10 @@ class XenAPIConnection(object):
|
|||||||
"""resume the specified instance"""
|
"""resume the specified instance"""
|
||||||
self._vmops.resume(instance, callback)
|
self._vmops.resume(instance, callback)
|
||||||
|
|
||||||
|
def reset_network(self, instance):
|
||||||
|
"""reset networking for specified instance"""
|
||||||
|
self._vmops.reset_network(instance)
|
||||||
|
|
||||||
def get_info(self, instance_id):
|
def get_info(self, instance_id):
|
||||||
"""Return data about VM instance"""
|
"""Return data about VM instance"""
|
||||||
return self._vmops.get_info(instance_id)
|
return self._vmops.get_info(instance_id)
|
||||||
|
@ -45,7 +45,7 @@ class API(base.Base):
|
|||||||
LOG.warn(_("Quota exceeeded for %(pid)s, tried to create"
|
LOG.warn(_("Quota exceeeded for %(pid)s, tried to create"
|
||||||
" %(size)sG volume") % locals())
|
" %(size)sG volume") % locals())
|
||||||
raise quota.QuotaError(_("Volume quota exceeded. You cannot "
|
raise quota.QuotaError(_("Volume quota exceeded. You cannot "
|
||||||
"create a volume of size %s") % size)
|
"create a volume of size %sG") % size)
|
||||||
|
|
||||||
options = {
|
options = {
|
||||||
'size': size,
|
'size': size,
|
||||||
|
@ -294,8 +294,10 @@ class ISCSIDriver(VolumeDriver):
|
|||||||
self._execute("sudo ietadm --op delete --tid=%s" %
|
self._execute("sudo ietadm --op delete --tid=%s" %
|
||||||
iscsi_target)
|
iscsi_target)
|
||||||
|
|
||||||
def _get_name_and_portal(self, volume_name, host):
|
def _get_name_and_portal(self, volume):
|
||||||
"""Gets iscsi name and portal from volume name and host."""
|
"""Gets iscsi name and portal from volume name and host."""
|
||||||
|
volume_name = volume['name']
|
||||||
|
host = volume['host']
|
||||||
(out, _err) = self._execute("sudo iscsiadm -m discovery -t "
|
(out, _err) = self._execute("sudo iscsiadm -m discovery -t "
|
||||||
"sendtargets -p %s" % host)
|
"sendtargets -p %s" % host)
|
||||||
for target in out.splitlines():
|
for target in out.splitlines():
|
||||||
@ -307,8 +309,7 @@ class ISCSIDriver(VolumeDriver):
|
|||||||
|
|
||||||
def discover_volume(self, volume):
|
def discover_volume(self, volume):
|
||||||
"""Discover volume on a remote host."""
|
"""Discover volume on a remote host."""
|
||||||
iscsi_name, iscsi_portal = self._get_name_and_portal(volume['name'],
|
iscsi_name, iscsi_portal = self._get_name_and_portal(volume)
|
||||||
volume['host'])
|
|
||||||
self._execute("sudo iscsiadm -m node -T %s -p %s --login" %
|
self._execute("sudo iscsiadm -m node -T %s -p %s --login" %
|
||||||
(iscsi_name, iscsi_portal))
|
(iscsi_name, iscsi_portal))
|
||||||
self._execute("sudo iscsiadm -m node -T %s -p %s --op update "
|
self._execute("sudo iscsiadm -m node -T %s -p %s --op update "
|
||||||
@ -319,8 +320,7 @@ class ISCSIDriver(VolumeDriver):
|
|||||||
|
|
||||||
def undiscover_volume(self, volume):
|
def undiscover_volume(self, volume):
|
||||||
"""Undiscover volume on a remote host."""
|
"""Undiscover volume on a remote host."""
|
||||||
iscsi_name, iscsi_portal = self._get_name_and_portal(volume['name'],
|
iscsi_name, iscsi_portal = self._get_name_and_portal(volume)
|
||||||
volume['host'])
|
|
||||||
self._execute("sudo iscsiadm -m node -T %s -p %s --op update "
|
self._execute("sudo iscsiadm -m node -T %s -p %s --op update "
|
||||||
"-n node.startup -v manual" %
|
"-n node.startup -v manual" %
|
||||||
(iscsi_name, iscsi_portal))
|
(iscsi_name, iscsi_portal))
|
||||||
|
@ -87,7 +87,7 @@ class VolumeManager(manager.Manager):
|
|||||||
if volume['status'] in ['available', 'in-use']:
|
if volume['status'] in ['available', 'in-use']:
|
||||||
self.driver.ensure_export(ctxt, volume)
|
self.driver.ensure_export(ctxt, volume)
|
||||||
else:
|
else:
|
||||||
LOG.info(_("volume %s: skipping export"), volume_ref['name'])
|
LOG.info(_("volume %s: skipping export"), volume['name'])
|
||||||
|
|
||||||
def create_volume(self, context, volume_id):
|
def create_volume(self, context, volume_id):
|
||||||
"""Creates and exports the volume."""
|
"""Creates and exports the volume."""
|
||||||
@ -111,10 +111,10 @@ class VolumeManager(manager.Manager):
|
|||||||
|
|
||||||
LOG.debug(_("volume %s: creating export"), volume_ref['name'])
|
LOG.debug(_("volume %s: creating export"), volume_ref['name'])
|
||||||
self.driver.create_export(context, volume_ref)
|
self.driver.create_export(context, volume_ref)
|
||||||
except Exception as e:
|
except Exception:
|
||||||
self.db.volume_update(context,
|
self.db.volume_update(context,
|
||||||
volume_ref['id'], {'status': 'error'})
|
volume_ref['id'], {'status': 'error'})
|
||||||
raise e
|
raise
|
||||||
|
|
||||||
now = datetime.datetime.utcnow()
|
now = datetime.datetime.utcnow()
|
||||||
self.db.volume_update(context,
|
self.db.volume_update(context,
|
||||||
@ -137,11 +137,11 @@ class VolumeManager(manager.Manager):
|
|||||||
self.driver.remove_export(context, volume_ref)
|
self.driver.remove_export(context, volume_ref)
|
||||||
LOG.debug(_("volume %s: deleting"), volume_ref['name'])
|
LOG.debug(_("volume %s: deleting"), volume_ref['name'])
|
||||||
self.driver.delete_volume(volume_ref)
|
self.driver.delete_volume(volume_ref)
|
||||||
except Exception as e:
|
except Exception:
|
||||||
self.db.volume_update(context,
|
self.db.volume_update(context,
|
||||||
volume_ref['id'],
|
volume_ref['id'],
|
||||||
{'status': 'error_deleting'})
|
{'status': 'error_deleting'})
|
||||||
raise e
|
raise
|
||||||
|
|
||||||
self.db.volume_destroy(context, volume_id)
|
self.db.volume_destroy(context, volume_id)
|
||||||
LOG.debug(_("volume %s: deleted successfully"), volume_ref['name'])
|
LOG.debug(_("volume %s: deleted successfully"), volume_ref['name'])
|
||||||
|
335
nova/volume/san.py
Normal file
335
nova/volume/san.py
Normal file
@ -0,0 +1,335 @@
|
|||||||
|
# vim: tabstop=4 shiftwidth=4 softtabstop=4
|
||||||
|
|
||||||
|
# Copyright 2011 Justin Santa Barbara
|
||||||
|
# All Rights Reserved.
|
||||||
|
#
|
||||||
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
||||||
|
# not use this file except in compliance with the License. You may obtain
|
||||||
|
# a copy of the License at
|
||||||
|
#
|
||||||
|
# http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
#
|
||||||
|
# Unless required by applicable law or agreed to in writing, software
|
||||||
|
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
||||||
|
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
||||||
|
# License for the specific language governing permissions and limitations
|
||||||
|
# under the License.
|
||||||
|
"""
|
||||||
|
Drivers for san-stored volumes.
|
||||||
|
The unique thing about a SAN is that we don't expect that we can run the volume
|
||||||
|
controller on the SAN hardware. We expect to access it over SSH or some API.
|
||||||
|
"""
|
||||||
|
|
||||||
|
import os
|
||||||
|
import paramiko
|
||||||
|
|
||||||
|
from nova import exception
|
||||||
|
from nova import flags
|
||||||
|
from nova import log as logging
|
||||||
|
from nova.utils import ssh_execute
|
||||||
|
from nova.volume.driver import ISCSIDriver
|
||||||
|
|
||||||
|
LOG = logging.getLogger("nova.volume.driver")
|
||||||
|
FLAGS = flags.FLAGS
|
||||||
|
flags.DEFINE_boolean('san_thin_provision', 'true',
|
||||||
|
'Use thin provisioning for SAN volumes?')
|
||||||
|
flags.DEFINE_string('san_ip', '',
|
||||||
|
'IP address of SAN controller')
|
||||||
|
flags.DEFINE_string('san_login', 'admin',
|
||||||
|
'Username for SAN controller')
|
||||||
|
flags.DEFINE_string('san_password', '',
|
||||||
|
'Password for SAN controller')
|
||||||
|
flags.DEFINE_string('san_privatekey', '',
|
||||||
|
'Filename of private key to use for SSH authentication')
|
||||||
|
|
||||||
|
|
||||||
|
class SanISCSIDriver(ISCSIDriver):
|
||||||
|
""" Base class for SAN-style storage volumes
|
||||||
|
(storage providers we access over SSH)"""
|
||||||
|
#Override because SAN ip != host ip
|
||||||
|
def _get_name_and_portal(self, volume):
|
||||||
|
"""Gets iscsi name and portal from volume name and host."""
|
||||||
|
volume_name = volume['name']
|
||||||
|
|
||||||
|
# TODO(justinsb): store in volume, remerge with generic iSCSI code
|
||||||
|
host = FLAGS.san_ip
|
||||||
|
|
||||||
|
(out, _err) = self._execute("sudo iscsiadm -m discovery -t "
|
||||||
|
"sendtargets -p %s" % host)
|
||||||
|
|
||||||
|
location = None
|
||||||
|
find_iscsi_name = self._build_iscsi_target_name(volume)
|
||||||
|
for target in out.splitlines():
|
||||||
|
if find_iscsi_name in target:
|
||||||
|
(location, _sep, iscsi_name) = target.partition(" ")
|
||||||
|
break
|
||||||
|
if not location:
|
||||||
|
raise exception.Error(_("Could not find iSCSI export "
|
||||||
|
" for volume %s") %
|
||||||
|
volume_name)
|
||||||
|
|
||||||
|
iscsi_portal = location.split(",")[0]
|
||||||
|
LOG.debug("iscsi_name=%s, iscsi_portal=%s" %
|
||||||
|
(iscsi_name, iscsi_portal))
|
||||||
|
return (iscsi_name, iscsi_portal)
|
||||||
|
|
||||||
|
def _build_iscsi_target_name(self, volume):
|
||||||
|
return "%s%s" % (FLAGS.iscsi_target_prefix, volume['name'])
|
||||||
|
|
||||||
|
# discover_volume is still OK
|
||||||
|
# undiscover_volume is still OK
|
||||||
|
|
||||||
|
def _connect_to_ssh(self):
|
||||||
|
ssh = paramiko.SSHClient()
|
||||||
|
#TODO(justinsb): We need a better SSH key policy
|
||||||
|
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
|
||||||
|
if FLAGS.san_password:
|
||||||
|
ssh.connect(FLAGS.san_ip,
|
||||||
|
username=FLAGS.san_login,
|
||||||
|
password=FLAGS.san_password)
|
||||||
|
elif FLAGS.san_privatekey:
|
||||||
|
privatekeyfile = os.path.expanduser(FLAGS.san_privatekey)
|
||||||
|
# It sucks that paramiko doesn't support DSA keys
|
||||||
|
privatekey = paramiko.RSAKey.from_private_key_file(privatekeyfile)
|
||||||
|
ssh.connect(FLAGS.san_ip,
|
||||||
|
username=FLAGS.san_login,
|
||||||
|
pkey=privatekey)
|
||||||
|
else:
|
||||||
|
raise exception.Error("Specify san_password or san_privatekey")
|
||||||
|
return ssh
|
||||||
|
|
||||||
|
def _run_ssh(self, command, check_exit_code=True):
|
||||||
|
#TODO(justinsb): SSH connection caching (?)
|
||||||
|
ssh = self._connect_to_ssh()
|
||||||
|
|
||||||
|
#TODO(justinsb): Reintroduce the retry hack
|
||||||
|
ret = ssh_execute(ssh, command, check_exit_code=check_exit_code)
|
||||||
|
|
||||||
|
ssh.close()
|
||||||
|
|
||||||
|
return ret
|
||||||
|
|
||||||
|
def ensure_export(self, context, volume):
|
||||||
|
"""Synchronously recreates an export for a logical volume."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def create_export(self, context, volume):
|
||||||
|
"""Exports the volume."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def remove_export(self, context, volume):
|
||||||
|
"""Removes an export for a logical volume."""
|
||||||
|
pass
|
||||||
|
|
||||||
|
def check_for_setup_error(self):
|
||||||
|
"""Returns an error if prerequisites aren't met"""
|
||||||
|
if not (FLAGS.san_password or FLAGS.san_privatekey):
|
||||||
|
raise exception.Error("Specify san_password or san_privatekey")
|
||||||
|
|
||||||
|
if not (FLAGS.san_ip):
|
||||||
|
raise exception.Error("san_ip must be set")
|
||||||
|
|
||||||
|
|
||||||
|
def _collect_lines(data):
|
||||||
|
""" Split lines from data into an array, trimming them """
|
||||||
|
matches = []
|
||||||
|
for line in data.splitlines():
|
||||||
|
match = line.strip()
|
||||||
|
matches.append(match)
|
||||||
|
|
||||||
|
return matches
|
||||||
|
|
||||||
|
|
||||||
|
def _get_prefixed_values(data, prefix):
|
||||||
|
"""Collect lines which start with prefix; with trimming"""
|
||||||
|
matches = []
|
||||||
|
for line in data.splitlines():
|
||||||
|
line = line.strip()
|
||||||
|
if line.startswith(prefix):
|
||||||
|
match = line[len(prefix):]
|
||||||
|
match = match.strip()
|
||||||
|
matches.append(match)
|
||||||
|
|
||||||
|
return matches
|
||||||
|
|
||||||
|
|
||||||
|
class SolarisISCSIDriver(SanISCSIDriver):
|
||||||
|
"""Executes commands relating to Solaris-hosted ISCSI volumes.
|
||||||
|
Basic setup for a Solaris iSCSI server:
|
||||||
|
pkg install storage-server SUNWiscsit
|
||||||
|
svcadm enable stmf
|
||||||
|
svcadm enable -r svc:/network/iscsi/target:default
|
||||||
|
pfexec itadm create-tpg e1000g0 ${MYIP}
|
||||||
|
pfexec itadm create-target -t e1000g0
|
||||||
|
|
||||||
|
Then grant the user that will be logging on lots of permissions.
|
||||||
|
I'm not sure exactly which though:
|
||||||
|
zfs allow justinsb create,mount,destroy rpool
|
||||||
|
usermod -P'File System Management' justinsb
|
||||||
|
usermod -P'Primary Administrator' justinsb
|
||||||
|
|
||||||
|
Also make sure you can login using san_login & san_password/san_privatekey
|
||||||
|
"""
|
||||||
|
|
||||||
|
def _view_exists(self, luid):
|
||||||
|
cmd = "pfexec /usr/sbin/stmfadm list-view -l %s" % (luid)
|
||||||
|
(out, _err) = self._run_ssh(cmd,
|
||||||
|
check_exit_code=False)
|
||||||
|
if "no views found" in out:
|
||||||
|
return False
|
||||||
|
|
||||||
|
if "View Entry:" in out:
|
||||||
|
return True
|
||||||
|
|
||||||
|
raise exception.Error("Cannot parse list-view output: %s" % (out))
|
||||||
|
|
||||||
|
def _get_target_groups(self):
|
||||||
|
"""Gets list of target groups from host."""
|
||||||
|
(out, _err) = self._run_ssh("pfexec /usr/sbin/stmfadm list-tg")
|
||||||
|
matches = _get_prefixed_values(out, 'Target group: ')
|
||||||
|
LOG.debug("target_groups=%s" % matches)
|
||||||
|
return matches
|
||||||
|
|
||||||
|
def _target_group_exists(self, target_group_name):
|
||||||
|
return target_group_name not in self._get_target_groups()
|
||||||
|
|
||||||
|
def _get_target_group_members(self, target_group_name):
|
||||||
|
(out, _err) = self._run_ssh("pfexec /usr/sbin/stmfadm list-tg -v %s" %
|
||||||
|
(target_group_name))
|
||||||
|
matches = _get_prefixed_values(out, 'Member: ')
|
||||||
|
LOG.debug("members of %s=%s" % (target_group_name, matches))
|
||||||
|
return matches
|
||||||
|
|
||||||
|
def _is_target_group_member(self, target_group_name, iscsi_target_name):
|
||||||
|
return iscsi_target_name in (
|
||||||
|
self._get_target_group_members(target_group_name))
|
||||||
|
|
||||||
|
def _get_iscsi_targets(self):
|
||||||
|
cmd = ("pfexec /usr/sbin/itadm list-target | "
|
||||||
|
"awk '{print $1}' | grep -v ^TARGET")
|
||||||
|
(out, _err) = self._run_ssh(cmd)
|
||||||
|
matches = _collect_lines(out)
|
||||||
|
LOG.debug("_get_iscsi_targets=%s" % (matches))
|
||||||
|
return matches
|
||||||
|
|
||||||
|
def _iscsi_target_exists(self, iscsi_target_name):
|
||||||
|
return iscsi_target_name in self._get_iscsi_targets()
|
||||||
|
|
||||||
|
def _build_zfs_poolname(self, volume):
|
||||||
|
#TODO(justinsb): rpool should be configurable
|
||||||
|
zfs_poolname = 'rpool/%s' % (volume['name'])
|
||||||
|
return zfs_poolname
|
||||||
|
|
||||||
|
def create_volume(self, volume):
|
||||||
|
"""Creates a volume."""
|
||||||
|
if int(volume['size']) == 0:
|
||||||
|
sizestr = '100M'
|
||||||
|
else:
|
||||||
|
sizestr = '%sG' % volume['size']
|
||||||
|
|
||||||
|
zfs_poolname = self._build_zfs_poolname(volume)
|
||||||
|
|
||||||
|
thin_provision_arg = '-s' if FLAGS.san_thin_provision else ''
|
||||||
|
# Create a zfs volume
|
||||||
|
self._run_ssh("pfexec /usr/sbin/zfs create %s -V %s %s" %
|
||||||
|
(thin_provision_arg,
|
||||||
|
sizestr,
|
||||||
|
zfs_poolname))
|
||||||
|
|
||||||
|
def _get_luid(self, volume):
|
||||||
|
zfs_poolname = self._build_zfs_poolname(volume)
|
||||||
|
|
||||||
|
cmd = ("pfexec /usr/sbin/sbdadm list-lu | "
|
||||||
|
"grep -w %s | awk '{print $1}'" %
|
||||||
|
(zfs_poolname))
|
||||||
|
|
||||||
|
(stdout, _stderr) = self._run_ssh(cmd)
|
||||||
|
|
||||||
|
luid = stdout.strip()
|
||||||
|
return luid
|
||||||
|
|
||||||
|
def _is_lu_created(self, volume):
|
||||||
|
luid = self._get_luid(volume)
|
||||||
|
return luid
|
||||||
|
|
||||||
|
def delete_volume(self, volume):
|
||||||
|
"""Deletes a volume."""
|
||||||
|
zfs_poolname = self._build_zfs_poolname(volume)
|
||||||
|
self._run_ssh("pfexec /usr/sbin/zfs destroy %s" %
|
||||||
|
(zfs_poolname))
|
||||||
|
|
||||||
|
def local_path(self, volume):
|
||||||
|
# TODO(justinsb): Is this needed here?
|
||||||
|
escaped_group = FLAGS.volume_group.replace('-', '--')
|
||||||
|
escaped_name = volume['name'].replace('-', '--')
|
||||||
|
return "/dev/mapper/%s-%s" % (escaped_group, escaped_name)
|
||||||
|
|
||||||
|
def ensure_export(self, context, volume):
|
||||||
|
"""Synchronously recreates an export for a logical volume."""
|
||||||
|
#TODO(justinsb): On bootup, this is called for every volume.
|
||||||
|
# It then runs ~5 SSH commands for each volume,
|
||||||
|
# most of which fetch the same info each time
|
||||||
|
# This makes initial start stupid-slow
|
||||||
|
self._do_export(volume, force_create=False)
|
||||||
|
|
||||||
|
def create_export(self, context, volume):
|
||||||
|
self._do_export(volume, force_create=True)
|
||||||
|
|
||||||
|
def _do_export(self, volume, force_create):
|
||||||
|
# Create a Logical Unit (LU) backed by the zfs volume
|
||||||
|
zfs_poolname = self._build_zfs_poolname(volume)
|
||||||
|
|
||||||
|
if force_create or not self._is_lu_created(volume):
|
||||||
|
cmd = ("pfexec /usr/sbin/sbdadm create-lu /dev/zvol/rdsk/%s" %
|
||||||
|
(zfs_poolname))
|
||||||
|
self._run_ssh(cmd)
|
||||||
|
|
||||||
|
luid = self._get_luid(volume)
|
||||||
|
iscsi_name = self._build_iscsi_target_name(volume)
|
||||||
|
target_group_name = 'tg-%s' % volume['name']
|
||||||
|
|
||||||
|
# Create a iSCSI target, mapped to just this volume
|
||||||
|
if force_create or not self._target_group_exists(target_group_name):
|
||||||
|
self._run_ssh("pfexec /usr/sbin/stmfadm create-tg %s" %
|
||||||
|
(target_group_name))
|
||||||
|
|
||||||
|
# Yes, we add the initiatior before we create it!
|
||||||
|
# Otherwise, it complains that the target is already active
|
||||||
|
if force_create or not self._is_target_group_member(target_group_name,
|
||||||
|
iscsi_name):
|
||||||
|
self._run_ssh("pfexec /usr/sbin/stmfadm add-tg-member -g %s %s" %
|
||||||
|
(target_group_name, iscsi_name))
|
||||||
|
if force_create or not self._iscsi_target_exists(iscsi_name):
|
||||||
|
self._run_ssh("pfexec /usr/sbin/itadm create-target -n %s" %
|
||||||
|
(iscsi_name))
|
||||||
|
if force_create or not self._view_exists(luid):
|
||||||
|
self._run_ssh("pfexec /usr/sbin/stmfadm add-view -t %s %s" %
|
||||||
|
(target_group_name, luid))
|
||||||
|
|
||||||
|
def remove_export(self, context, volume):
|
||||||
|
"""Removes an export for a logical volume."""
|
||||||
|
|
||||||
|
# This is the reverse of _do_export
|
||||||
|
luid = self._get_luid(volume)
|
||||||
|
iscsi_name = self._build_iscsi_target_name(volume)
|
||||||
|
target_group_name = 'tg-%s' % volume['name']
|
||||||
|
|
||||||
|
if self._view_exists(luid):
|
||||||
|
self._run_ssh("pfexec /usr/sbin/stmfadm remove-view -l %s -a" %
|
||||||
|
(luid))
|
||||||
|
|
||||||
|
if self._iscsi_target_exists(iscsi_name):
|
||||||
|
self._run_ssh("pfexec /usr/sbin/stmfadm offline-target %s" %
|
||||||
|
(iscsi_name))
|
||||||
|
self._run_ssh("pfexec /usr/sbin/itadm delete-target %s" %
|
||||||
|
(iscsi_name))
|
||||||
|
|
||||||
|
# We don't delete the tg-member; we delete the whole tg!
|
||||||
|
|
||||||
|
if self._target_group_exists(target_group_name):
|
||||||
|
self._run_ssh("pfexec /usr/sbin/stmfadm delete-tg %s" %
|
||||||
|
(target_group_name))
|
||||||
|
|
||||||
|
if self._is_lu_created(volume):
|
||||||
|
self._run_ssh("pfexec /usr/sbin/sbdadm delete-lu %s" %
|
||||||
|
(luid))
|
@ -73,8 +73,8 @@ def key_init(self, arg_dict):
|
|||||||
@jsonify
|
@jsonify
|
||||||
def password(self, arg_dict):
|
def password(self, arg_dict):
|
||||||
"""Writes a request to xenstore that tells the agent to set
|
"""Writes a request to xenstore that tells the agent to set
|
||||||
the root password for the given VM. The password should be
|
the root password for the given VM. The password should be
|
||||||
encrypted using the shared secret key that was returned by a
|
encrypted using the shared secret key that was returned by a
|
||||||
previous call to key_init. The encrypted password value should
|
previous call to key_init. The encrypted password value should
|
||||||
be passed as the value for the 'enc_pass' key in arg_dict.
|
be passed as the value for the 'enc_pass' key in arg_dict.
|
||||||
"""
|
"""
|
||||||
@ -91,6 +91,17 @@ def password(self, arg_dict):
|
|||||||
return resp
|
return resp
|
||||||
|
|
||||||
|
|
||||||
|
@jsonify
|
||||||
|
def resetnetwork(self, arg_dict):
|
||||||
|
"""Writes a resquest to xenstore that tells the agent
|
||||||
|
to reset networking.
|
||||||
|
"""
|
||||||
|
arg_dict['value'] = json.dumps({'name': 'resetnetwork', 'value': ''})
|
||||||
|
request_id = arg_dict['id']
|
||||||
|
arg_dict['path'] = "data/host/%s" % request_id
|
||||||
|
xenstore.write_record(self, arg_dict)
|
||||||
|
|
||||||
|
|
||||||
def _wait_for_agent(self, request_id, arg_dict):
|
def _wait_for_agent(self, request_id, arg_dict):
|
||||||
"""Periodically checks xenstore for a response from the agent.
|
"""Periodically checks xenstore for a response from the agent.
|
||||||
The request is always written to 'data/host/{id}', and
|
The request is always written to 'data/host/{id}', and
|
||||||
@ -108,7 +119,8 @@ def _wait_for_agent(self, request_id, arg_dict):
|
|||||||
# First, delete the request record
|
# First, delete the request record
|
||||||
arg_dict["path"] = "data/host/%s" % request_id
|
arg_dict["path"] = "data/host/%s" % request_id
|
||||||
xenstore.delete_record(self, arg_dict)
|
xenstore.delete_record(self, arg_dict)
|
||||||
raise TimeoutError("TIMEOUT: No response from agent within %s seconds." %
|
raise TimeoutError(
|
||||||
|
"TIMEOUT: No response from agent within %s seconds." %
|
||||||
AGENT_TIMEOUT)
|
AGENT_TIMEOUT)
|
||||||
ret = xenstore.read_record(self, arg_dict)
|
ret = xenstore.read_record(self, arg_dict)
|
||||||
# Note: the response for None with be a string that includes
|
# Note: the response for None with be a string that includes
|
||||||
@ -123,4 +135,5 @@ def _wait_for_agent(self, request_id, arg_dict):
|
|||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
XenAPIPlugin.dispatch(
|
XenAPIPlugin.dispatch(
|
||||||
{"key_init": key_init,
|
{"key_init": key_init,
|
||||||
"password": password})
|
"password": password,
|
||||||
|
"resetnetwork": resetnetwork})
|
||||||
|
@ -43,32 +43,47 @@ CHUNK_SIZE = 8192
|
|||||||
KERNEL_DIR = '/boot/guest'
|
KERNEL_DIR = '/boot/guest'
|
||||||
FILE_SR_PATH = '/var/run/sr-mount'
|
FILE_SR_PATH = '/var/run/sr-mount'
|
||||||
|
|
||||||
def copy_kernel_vdi(session,args):
|
|
||||||
|
def remove_kernel_ramdisk(session, args):
|
||||||
|
"""Removes kernel and/or ramdisk from dom0's file system"""
|
||||||
|
kernel_file = exists(args, 'kernel-file')
|
||||||
|
ramdisk_file = exists(args, 'ramdisk-file')
|
||||||
|
if kernel_file:
|
||||||
|
os.remove(kernel_file)
|
||||||
|
if ramdisk_file:
|
||||||
|
os.remove(ramdisk_file)
|
||||||
|
return "ok"
|
||||||
|
|
||||||
|
|
||||||
|
def copy_kernel_vdi(session, args):
|
||||||
vdi = exists(args, 'vdi-ref')
|
vdi = exists(args, 'vdi-ref')
|
||||||
size = exists(args,'image-size')
|
size = exists(args, 'image-size')
|
||||||
#Use the uuid as a filename
|
#Use the uuid as a filename
|
||||||
vdi_uuid=session.xenapi.VDI.get_uuid(vdi)
|
vdi_uuid = session.xenapi.VDI.get_uuid(vdi)
|
||||||
copy_args={'vdi_uuid':vdi_uuid,'vdi_size':int(size)}
|
copy_args = {'vdi_uuid': vdi_uuid, 'vdi_size': int(size)}
|
||||||
filename=with_vdi_in_dom0(session, vdi, False,
|
filename = with_vdi_in_dom0(session, vdi, False,
|
||||||
lambda dev:
|
lambda dev:
|
||||||
_copy_kernel_vdi('/dev/%s' % dev,copy_args))
|
_copy_kernel_vdi('/dev/%s' % dev, copy_args))
|
||||||
return filename
|
return filename
|
||||||
|
|
||||||
def _copy_kernel_vdi(dest,copy_args):
|
|
||||||
vdi_uuid=copy_args['vdi_uuid']
|
def _copy_kernel_vdi(dest, copy_args):
|
||||||
vdi_size=copy_args['vdi_size']
|
vdi_uuid = copy_args['vdi_uuid']
|
||||||
logging.debug("copying kernel/ramdisk file from %s to /boot/guest/%s",dest,vdi_uuid)
|
vdi_size = copy_args['vdi_size']
|
||||||
filename=KERNEL_DIR + '/' + vdi_uuid
|
logging.debug("copying kernel/ramdisk file from %s to /boot/guest/%s",
|
||||||
|
dest, vdi_uuid)
|
||||||
|
filename = KERNEL_DIR + '/' + vdi_uuid
|
||||||
#read data from /dev/ and write into a file on /boot/guest
|
#read data from /dev/ and write into a file on /boot/guest
|
||||||
of=open(filename,'wb')
|
of = open(filename, 'wb')
|
||||||
f=open(dest,'rb')
|
f = open(dest, 'rb')
|
||||||
#copy only vdi_size bytes
|
#copy only vdi_size bytes
|
||||||
data=f.read(vdi_size)
|
data = f.read(vdi_size)
|
||||||
of.write(data)
|
of.write(data)
|
||||||
f.close()
|
f.close()
|
||||||
of.close()
|
of.close()
|
||||||
logging.debug("Done. Filename: %s",filename)
|
logging.debug("Done. Filename: %s", filename)
|
||||||
return filename
|
return filename
|
||||||
|
|
||||||
|
|
||||||
def put_vdis(session, args):
|
def put_vdis(session, args):
|
||||||
params = pickle.loads(exists(args, 'params'))
|
params = pickle.loads(exists(args, 'params'))
|
||||||
@ -76,22 +91,23 @@ def put_vdis(session, args):
|
|||||||
image_id = params["image_id"]
|
image_id = params["image_id"]
|
||||||
glance_host = params["glance_host"]
|
glance_host = params["glance_host"]
|
||||||
glance_port = params["glance_port"]
|
glance_port = params["glance_port"]
|
||||||
|
|
||||||
sr_path = get_sr_path(session)
|
sr_path = get_sr_path(session)
|
||||||
#FIXME(sirp): writing to a temp file until Glance supports chunked-PUTs
|
#FIXME(sirp): writing to a temp file until Glance supports chunked-PUTs
|
||||||
tmp_file = "%s.tar.gz" % os.path.join('/tmp', str(image_id))
|
tmp_file = "%s.tar.gz" % os.path.join('/tmp', str(image_id))
|
||||||
tar_cmd = ['tar', '-zcf', tmp_file, '--directory=%s' % sr_path]
|
tar_cmd = ['tar', '-zcf', tmp_file, '--directory=%s' % sr_path]
|
||||||
paths = [ "%s.vhd" % vdi_uuid for vdi_uuid in vdi_uuids ]
|
paths = ["%s.vhd" % vdi_uuid for vdi_uuid in vdi_uuids]
|
||||||
tar_cmd.extend(paths)
|
tar_cmd.extend(paths)
|
||||||
logging.debug("Bundling image with cmd: %s", tar_cmd)
|
logging.debug("Bundling image with cmd: %s", tar_cmd)
|
||||||
subprocess.call(tar_cmd)
|
subprocess.call(tar_cmd)
|
||||||
logging.debug("Writing to test file %s", tmp_file)
|
logging.debug("Writing to test file %s", tmp_file)
|
||||||
put_bundle_in_glance(tmp_file, image_id, glance_host, glance_port)
|
put_bundle_in_glance(tmp_file, image_id, glance_host, glance_port)
|
||||||
return "" # FIXME(sirp): return anything useful here?
|
# FIXME(sirp): return anything useful here?
|
||||||
|
return ""
|
||||||
|
|
||||||
|
|
||||||
def put_bundle_in_glance(tmp_file, image_id, glance_host, glance_port):
|
def put_bundle_in_glance(tmp_file, image_id, glance_host, glance_port):
|
||||||
size = os.path.getsize(tmp_file)
|
size = os.path.getsize(tmp_file)
|
||||||
basename = os.path.basename(tmp_file)
|
basename = os.path.basename(tmp_file)
|
||||||
|
|
||||||
bundle = open(tmp_file, 'r')
|
bundle = open(tmp_file, 'r')
|
||||||
@ -112,12 +128,11 @@ def put_bundle_in_glance(tmp_file, image_id, glance_host, glance_port):
|
|||||||
for header, value in headers.iteritems():
|
for header, value in headers.iteritems():
|
||||||
conn.putheader(header, value)
|
conn.putheader(header, value)
|
||||||
conn.endheaders()
|
conn.endheaders()
|
||||||
|
|
||||||
chunk = bundle.read(CHUNK_SIZE)
|
chunk = bundle.read(CHUNK_SIZE)
|
||||||
while chunk:
|
while chunk:
|
||||||
conn.send(chunk)
|
conn.send(chunk)
|
||||||
chunk = bundle.read(CHUNK_SIZE)
|
chunk = bundle.read(CHUNK_SIZE)
|
||||||
|
|
||||||
|
|
||||||
res = conn.getresponse()
|
res = conn.getresponse()
|
||||||
#FIXME(sirp): should this be 201 Created?
|
#FIXME(sirp): should this be 201 Created?
|
||||||
@ -126,6 +141,7 @@ def put_bundle_in_glance(tmp_file, image_id, glance_host, glance_port):
|
|||||||
finally:
|
finally:
|
||||||
bundle.close()
|
bundle.close()
|
||||||
|
|
||||||
|
|
||||||
def get_sr_path(session):
|
def get_sr_path(session):
|
||||||
sr_ref = find_sr(session)
|
sr_ref = find_sr(session)
|
||||||
|
|
||||||
@ -156,5 +172,6 @@ def find_sr(session):
|
|||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
XenAPIPlugin.dispatch({'put_vdis': put_vdis,
|
XenAPIPlugin.dispatch({'put_vdis': put_vdis,
|
||||||
'copy_kernel_vdi': copy_kernel_vdi})
|
'copy_kernel_vdi': copy_kernel_vdi,
|
||||||
|
'remove_kernel_ramdisk': remove_kernel_ramdisk})
|
||||||
|
@ -43,34 +43,37 @@ SECTOR_SIZE = 512
|
|||||||
MBR_SIZE_SECTORS = 63
|
MBR_SIZE_SECTORS = 63
|
||||||
MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE
|
MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE
|
||||||
|
|
||||||
def is_vdi_pv(session,args):
|
|
||||||
|
def is_vdi_pv(session, args):
|
||||||
logging.debug("Checking wheter VDI has PV kernel")
|
logging.debug("Checking wheter VDI has PV kernel")
|
||||||
vdi = exists(args, 'vdi-ref')
|
vdi = exists(args, 'vdi-ref')
|
||||||
pv=with_vdi_in_dom0(session, vdi, False,
|
pv = with_vdi_in_dom0(session, vdi, False,
|
||||||
lambda dev: _is_vdi_pv('/dev/%s' % dev))
|
lambda dev: _is_vdi_pv('/dev/%s' % dev))
|
||||||
if pv:
|
if pv:
|
||||||
return 'true'
|
return 'true'
|
||||||
else:
|
else:
|
||||||
return 'false'
|
return 'false'
|
||||||
|
|
||||||
|
|
||||||
def _is_vdi_pv(dest):
|
def _is_vdi_pv(dest):
|
||||||
logging.debug("Running pygrub against %s",dest)
|
logging.debug("Running pygrub against %s", dest)
|
||||||
output=os.popen('pygrub -qn %s' % dest)
|
output = os.popen('pygrub -qn %s' % dest)
|
||||||
pv=False
|
pv = False
|
||||||
for line in output.readlines():
|
for line in output.readlines():
|
||||||
#try to find kernel string
|
#try to find kernel string
|
||||||
m=re.search('(?<=kernel:)/.*(?:>)',line)
|
m = re.search('(?<=kernel:)/.*(?:>)', line)
|
||||||
if m:
|
if m:
|
||||||
if m.group(0).find('xen')!=-1:
|
if m.group(0).find('xen') != -1:
|
||||||
pv=True
|
pv = True
|
||||||
logging.debug("PV:%d",pv)
|
logging.debug("PV:%d", pv)
|
||||||
return pv
|
return pv
|
||||||
|
|
||||||
|
|
||||||
def get_vdi(session, args):
|
def get_vdi(session, args):
|
||||||
src_url = exists(args, 'src_url')
|
src_url = exists(args, 'src_url')
|
||||||
username = exists(args, 'username')
|
username = exists(args, 'username')
|
||||||
password = exists(args, 'password')
|
password = exists(args, 'password')
|
||||||
raw_image=validate_bool(args, 'raw', 'false')
|
raw_image = validate_bool(args, 'raw', 'false')
|
||||||
add_partition = validate_bool(args, 'add_partition', 'false')
|
add_partition = validate_bool(args, 'add_partition', 'false')
|
||||||
(proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url)
|
(proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url)
|
||||||
sr = find_sr(session)
|
sr = find_sr(session)
|
||||||
@ -88,16 +91,17 @@ def get_vdi(session, args):
|
|||||||
vdi = create_vdi(session, sr, src_url, vdi_size, False)
|
vdi = create_vdi(session, sr, src_url, vdi_size, False)
|
||||||
with_vdi_in_dom0(session, vdi, False,
|
with_vdi_in_dom0(session, vdi, False,
|
||||||
lambda dev: get_vdi_(proto, netloc, url_path,
|
lambda dev: get_vdi_(proto, netloc, url_path,
|
||||||
username, password, add_partition,raw_image,
|
username, password,
|
||||||
|
add_partition, raw_image,
|
||||||
virtual_size, '/dev/%s' % dev))
|
virtual_size, '/dev/%s' % dev))
|
||||||
return session.xenapi.VDI.get_uuid(vdi)
|
return session.xenapi.VDI.get_uuid(vdi)
|
||||||
|
|
||||||
|
|
||||||
def get_vdi_(proto, netloc, url_path, username, password, add_partition,raw_image,
|
def get_vdi_(proto, netloc, url_path, username, password,
|
||||||
virtual_size, dest):
|
add_partition, raw_image, virtual_size, dest):
|
||||||
|
|
||||||
#Salvatore: vdi should not be partitioned for raw images
|
#vdi should not be partitioned for raw images
|
||||||
if (add_partition and not raw_image):
|
if add_partition and not raw_image:
|
||||||
write_partition(virtual_size, dest)
|
write_partition(virtual_size, dest)
|
||||||
|
|
||||||
offset = (add_partition and not raw_image and MBR_SIZE_BYTES) or 0
|
offset = (add_partition and not raw_image and MBR_SIZE_BYTES) or 0
|
||||||
@ -144,7 +148,7 @@ def get_kernel(session, args):
|
|||||||
password = exists(args, 'password')
|
password = exists(args, 'password')
|
||||||
|
|
||||||
(proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url)
|
(proto, netloc, url_path, _, _, _) = urlparse.urlparse(src_url)
|
||||||
|
|
||||||
dest = os.path.join(KERNEL_DIR, url_path[1:])
|
dest = os.path.join(KERNEL_DIR, url_path[1:])
|
||||||
|
|
||||||
# Paranoid check against people using ../ to do rude things.
|
# Paranoid check against people using ../ to do rude things.
|
||||||
@ -154,8 +158,8 @@ def get_kernel(session, args):
|
|||||||
dirname = os.path.dirname(dest)
|
dirname = os.path.dirname(dest)
|
||||||
try:
|
try:
|
||||||
os.makedirs(dirname)
|
os.makedirs(dirname)
|
||||||
except os.error, e:
|
except os.error, e:
|
||||||
if e.errno != errno.EEXIST:
|
if e.errno != errno.EEXIST:
|
||||||
raise
|
raise
|
||||||
if not os.path.isdir(dirname):
|
if not os.path.isdir(dirname):
|
||||||
raise Exception('Cannot make directory %s', dirname)
|
raise Exception('Cannot make directory %s', dirname)
|
||||||
@ -248,5 +252,5 @@ def download_all(response, length, dest_file, offset):
|
|||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
XenAPIPlugin.dispatch({'get_vdi': get_vdi,
|
XenAPIPlugin.dispatch({'get_vdi': get_vdi,
|
||||||
'get_kernel': get_kernel,
|
'get_kernel': get_kernel,
|
||||||
'is_vdi_pv': is_vdi_pv})
|
'is_vdi_pv': is_vdi_pv})
|
||||||
|
@ -36,7 +36,15 @@ pluginlib.configure_logging("xenstore")
|
|||||||
|
|
||||||
def jsonify(fnc):
|
def jsonify(fnc):
|
||||||
def wrapper(*args, **kwargs):
|
def wrapper(*args, **kwargs):
|
||||||
return json.dumps(fnc(*args, **kwargs))
|
ret = fnc(*args, **kwargs)
|
||||||
|
try:
|
||||||
|
json.loads(ret)
|
||||||
|
except ValueError:
|
||||||
|
# Value should already be JSON-encoded, but some operations
|
||||||
|
# may write raw sting values; this will catch those and
|
||||||
|
# properly encode them.
|
||||||
|
ret = json.dumps(ret)
|
||||||
|
return ret
|
||||||
return wrapper
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
@ -26,6 +26,8 @@ from nose import config
|
|||||||
from nose import result
|
from nose import result
|
||||||
from nose import core
|
from nose import core
|
||||||
|
|
||||||
|
from nova import log as logging
|
||||||
|
|
||||||
|
|
||||||
class NovaTestResult(result.TextTestResult):
|
class NovaTestResult(result.TextTestResult):
|
||||||
def __init__(self, *args, **kw):
|
def __init__(self, *args, **kw):
|
||||||
@ -58,6 +60,7 @@ class NovaTestRunner(core.TextTestRunner):
|
|||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
logging.basicConfig()
|
||||||
c = config.Config(stream=sys.stdout,
|
c = config.Config(stream=sys.stdout,
|
||||||
env=os.environ,
|
env=os.environ,
|
||||||
verbosity=3,
|
verbosity=3,
|
||||||
|
@ -65,10 +65,15 @@ then
|
|||||||
if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then
|
if [ "x$use_ve" = "xY" -o "x$use_ve" = "x" -o "x$use_ve" = "xy" ]; then
|
||||||
# Install the virtualenv and run the test suite in it
|
# Install the virtualenv and run the test suite in it
|
||||||
python tools/install_venv.py
|
python tools/install_venv.py
|
||||||
wrapper=${with_venv}
|
wrapper=${with_venv}
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
fi
|
fi
|
||||||
|
|
||||||
run_tests && pep8 --repeat --show-pep8 --show-source --exclude=vcsversion.py bin/* nova setup.py || exit 1
|
if [ -z "$noseargs" ];
|
||||||
|
then
|
||||||
|
run_tests && pep8 --repeat --show-pep8 --show-source --exclude=vcsversion.py bin/* nova setup.py || exit 1
|
||||||
|
else
|
||||||
|
run_tests
|
||||||
|
fi
|
||||||
|
7
setup.py
7
setup.py
@ -85,9 +85,13 @@ setup(name='nova',
|
|||||||
packages=find_packages(exclude=['bin', 'smoketests']),
|
packages=find_packages(exclude=['bin', 'smoketests']),
|
||||||
include_package_data=True,
|
include_package_data=True,
|
||||||
test_suite='nose.collector',
|
test_suite='nose.collector',
|
||||||
scripts=['bin/nova-api',
|
scripts=['bin/nova-ajax-console-proxy',
|
||||||
|
'bin/nova-api',
|
||||||
|
'bin/nova-combined',
|
||||||
'bin/nova-compute',
|
'bin/nova-compute',
|
||||||
|
'bin/nova-console',
|
||||||
'bin/nova-dhcpbridge',
|
'bin/nova-dhcpbridge',
|
||||||
|
'bin/nova-direct-api',
|
||||||
'bin/nova-import-canonical-imagestore',
|
'bin/nova-import-canonical-imagestore',
|
||||||
'bin/nova-instancemonitor',
|
'bin/nova-instancemonitor',
|
||||||
'bin/nova-logspool',
|
'bin/nova-logspool',
|
||||||
@ -96,5 +100,6 @@ setup(name='nova',
|
|||||||
'bin/nova-objectstore',
|
'bin/nova-objectstore',
|
||||||
'bin/nova-scheduler',
|
'bin/nova-scheduler',
|
||||||
'bin/nova-spoolsentry',
|
'bin/nova-spoolsentry',
|
||||||
|
'bin/stack',
|
||||||
'bin/nova-volume',
|
'bin/nova-volume',
|
||||||
'tools/nova-debug'])
|
'tools/nova-debug'])
|
||||||
|
Loading…
x
Reference in New Issue
Block a user