From 7c8096384507908a5e583f4554d0fc765ae5f2eb Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Thu, 27 Jan 2011 20:39:33 +0900 Subject: [PATCH 01/76] adding testcode --- bin/nova-api | 2 + nova/compute/manager.py | 66 ++-- nova/db/sqlalchemy/api.py | 49 +-- nova/db/sqlalchemy/models.py | 12 +- nova/scheduler/driver.py | 53 ++- nova/scheduler/manager.py | 28 +- nova/tests/test_compute.py | 305 +++++++++++++++ nova/tests/test_scheduler.py | 722 +++++++++++++++++++++++++++++++++++ nova/tests/test_service.py | 59 +++ nova/tests/test_virt.py | 520 ++++++++++++++++++++++++- nova/virt/fake.py | 12 +- nova/virt/libvirt_conn.py | 153 ++++---- nova/virt/xenapi_conn.py | 14 +- nova/volume/manager.py | 2 +- 14 files changed, 1805 insertions(+), 192 deletions(-) diff --git a/bin/nova-api b/bin/nova-api index 7b4fbeab1402..fba09889f413 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -37,11 +37,13 @@ gettext.install('nova', unicode=1) from nova import flags from nova import log as logging from nova import wsgi +from nova import utils logging.basicConfig() LOG = logging.getLogger('nova.api') LOG.setLevel(logging.DEBUG) +utils.default_flagfile() FLAGS = flags.FLAGS API_ENDPOINTS = ['ec2', 'osapi'] diff --git a/nova/compute/manager.py b/nova/compute/manager.py index efb5753aaa95..4acba71530d5 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -125,12 +125,12 @@ class ComputeManager(manager.Manager): """Insert compute node specific information to DB.""" try: - service_ref = db.service_get_by_args(ctxt, - host, - binary) + service_ref = self.db.service_get_by_args(ctxt, + host, + binary) except exception.NotFound: - msg = _(("""Cannot insert compute manager specific info""" - """Because no service record found.""")) + msg = _(("""Cannot insert compute manager specific info,""" + """ Because no service record found.""")) raise exception.Invalid(msg) # Updating host information @@ -141,14 +141,14 @@ class ComputeManager(manager.Manager): version = self.driver.get_hypervisor_version() cpu_info = self.driver.get_cpu_info() - db.service_update(ctxt, - service_ref['id'], - {'vcpus': vcpu, - 'memory_mb': memory_mb, - 'local_gb': local_gb, - 'hypervisor_type': hypervisor, - 'hypervisor_version': version, - 'cpu_info': cpu_info}) + self.db.service_update(ctxt, + service_ref['id'], + {'vcpus': vcpu, + 'memory_mb': memory_mb, + 'local_gb': local_gb, + 'hypervisor_type': hypervisor, + 'hypervisor_version': version, + 'cpu_info': cpu_info}) def _update_state(self, context, instance_id): """Update the state of an instance from the driver info.""" @@ -596,22 +596,22 @@ class ComputeManager(manager.Manager): """ Check the host cpu is compatible to a cpu given by xml.""" return self.driver.compare_cpu(cpu_info) - def pre_live_migration(self, context, instance_id, dest): + def pre_live_migration(self, context, instance_id): """Any preparation for live migration at dst host.""" # Getting instance info - instance_ref = db.instance_get(context, instance_id) + instance_ref = self.db.instance_get(context, instance_id) ec2_id = instance_ref['hostname'] # Getting fixed ips - fixed_ip = db.instance_get_fixed_address(context, instance_id) + fixed_ip = self.db.instance_get_fixed_address(context, instance_id) if not fixed_ip: msg = _('%s(%s) doesnt have fixed_ip') % (instance_id, ec2_id) raise exception.NotFound(msg) # If any volume is mounted, prepare here. if len(instance_ref['volumes']) == 0: - logging.info(_("%s has no volume.") % ec2_id) + LOG.info(_("%s has no volume."), ec2_id) else: for v in instance_ref['volumes']: self.volume_manager.setup_compute_volume(context, v['id']) @@ -634,7 +634,7 @@ class ComputeManager(manager.Manager): """executes live migration.""" # Get instance for error handling. - instance_ref = db.instance_get(context, instance_id) + instance_ref = self.db.instance_get(context, instance_id) ec2_id = instance_ref['hostname'] try: @@ -647,27 +647,27 @@ class ComputeManager(manager.Manager): "args": {'instance_id': instance_id}}) # Asking dest host to preparing live migration. - compute_topic = db.queue_get_for(context, - FLAGS.compute_topic, - dest) + compute_topic = self.db.queue_get_for(context, + FLAGS.compute_topic, + dest) rpc.call(context, - compute_topic, - {"method": "pre_live_migration", - "args": {'instance_id': instance_id, - 'dest': dest}}) + compute_topic, + {"method": "pre_live_migration", + "args": {'instance_id': instance_id}}) except Exception, e: + print e msg = _('Pre live migration for %s failed at %s') - logging.error(msg, ec2_id, dest) - db.instance_set_state(context, - instance_id, - power_state.RUNNING, - 'running') + LOG.error(msg, ec2_id, dest) + self.db.instance_set_state(context, + instance_id, + power_state.RUNNING, + 'running') for v in instance_ref['volumes']: - db.volume_update(context, - v['id'], - {'status': 'in-use'}) + self.db.volume_update(context, + v['id'], + {'status': 'in-use'}) # e should be raised. just calling "raise" may raise NotFound. raise e diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 248a46f65297..1cdd5a286071 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -897,41 +897,42 @@ def instance_get_all_by_host(context, hostname): @require_context -def _instance_get_sum_by_host_and_project(context, column, hostname, proj_id): +def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id): session = get_session() - result = session.query(models.Instance).\ - filter_by(host=hostname).\ - filter_by(project_id=proj_id).\ - filter_by(deleted=can_read_deleted(context)).\ - value(column) - if not result: + filter_by(host=hostname).\ + filter_by(project_id=proj_id).\ + filter_by(deleted=False).\ + value(func.sum(models.Instance.vcpus)) + if None == result: return 0 return result -@require_context -def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id): - return _instance_get_sum_by_host_and_project(context, - 'vcpus', - hostname, - proj_id) - - @require_context def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id): - return _instance_get_sum_by_host_and_project(context, - 'memory_mb', - hostname, - proj_id) - + session = get_session() + result = session.query(models.Instance).\ + filter_by(host=hostname).\ + filter_by(project_id=proj_id).\ + filter_by(deleted=False).\ + value(func.sum(models.Instance.memory_mb)) + if None == result: + return 0 + return result @require_context def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id): - return _instance_get_sum_by_host_and_project(context, - 'local_gb', - hostname, - proj_id) + session = get_session() + result = session.query(models.Instance).\ + filter_by(host=hostname).\ + filter_by(project_id=proj_id).\ + filter_by(deleted=False).\ + value(func.sum(models.Instance.local_gb)) + if None == result: + return 0 + return result + @require_context diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index b28c64b592f9..7c40d55968a0 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -161,11 +161,11 @@ class Service(BASE, NovaBase): # The below items are compute node only. # -1 or None is inserted for other service. - vcpus = Column(Integer, nullable=False, default=-1) - memory_mb = Column(Integer, nullable=False, default=-1) - local_gb = Column(Integer, nullable=False, default=-1) - hypervisor_type = Column(String(128)) - hypervisor_version = Column(Integer, nullable=False, default=-1) + vcpus = Column(Integer, nullable=True) + memory_mb = Column(Integer, nullable=True) + local_gb = Column(Integer, nullable=True) + hypervisor_type = Column(String(128), nullable=True) + hypervisor_version = Column(Integer, nullable=True) # Note(masumotok): Expected Strings example: # # '{"arch":"x86_64", "model":"Nehalem", @@ -174,7 +174,7 @@ class Service(BASE, NovaBase): # # Points are "json translatable" and it must have all # dictionary keys above. - cpu_info = Column(String(512)) + cpu_info = Column(Text(), nullable=True) class Certificate(BASE, NovaBase): diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index 65745093b56b..d4ad4238859d 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -69,11 +69,10 @@ class Scheduler(object): raise NotImplementedError(_("Must implement a fallback schedule")) def schedule_live_migration(self, context, instance_id, dest): - """ live migration method """ + """live migration method""" # Whether instance exists and running instance_ref = db.instance_get(context, instance_id) - ec2_id = instance_ref['hostname'] # Checking instance. self._live_migration_src_check(context, instance_ref) @@ -159,48 +158,45 @@ class Scheduler(object): def _live_migration_common_check(self, context, instance_ref, dest): """ - Live migration check routine. - Below pre-checkings are followed by - http://wiki.libvirt.org/page/TodoPreMigrationChecks + Live migration check routine. + Below pre-checkings are followed by + http://wiki.libvirt.org/page/TodoPreMigrationChecks """ # Checking dest exists. dservice_refs = db.service_get_all_by_host(context, dest) if len(dservice_refs) <= 0: - msg = _('%s does not exists.') - raise exception.Invalid(msg % dest) + raise exception.Invalid(_('%s does not exists.') % dest) dservice_ref = dservice_refs[0] # Checking original host( where instance was launched at) exists. - orighost = instance_ref['launched_on'] - oservice_refs = db.service_get_all_by_host(context, orighost) + oservice_refs = db.service_get_all_by_host(context, + instance_ref['launched_on']) if len(oservice_refs) <= 0: msg = _('%s(where instance was launched at) does not exists.') - raise exception.Invalid(msg % orighost) + raise exception.Invalid(msg % instance_ref['launched_on']) oservice_ref = oservice_refs[0] # Checking hypervisor is same. - otype = oservice_ref['hypervisor_type'] - dtype = dservice_ref['hypervisor_type'] - if otype != dtype: + if oservice_ref['hypervisor_type'] != dservice_ref['hypervisor_type']: msg = _('Different hypervisor type(%s->%s)') - raise exception.Invalid(msg % (otype, dtype)) + raise exception.Invalid(msg % (oservice_ref['hypervisor_type'], + dservice_ref['hypervisor_type'])) # Checkng hypervisor version. - oversion = oservice_ref['hypervisor_version'] - dversion = dservice_ref['hypervisor_version'] - if oversion > dversion: + if oservice_ref['hypervisor_version'] > \ + dservice_ref['hypervisor_version']: msg = _('Older hypervisor version(%s->%s)') - raise exception.Invalid(msg % (oversion, dversion)) + raise exception.Invalid(msg % (oservice_ref['hypervisor_version'], + dservice_ref['hypervisor_version'])) # Checking cpuinfo. - cpu_info = oservice_ref['cpu_info'] try: rpc.call(context, db.queue_get_for(context, FLAGS.compute_topic, dest), {"method": 'compare_cpu', - "args": {'cpu_info': cpu_info}}) + "args": {'cpu_info': oservice_ref['cpu_info']}}) except rpc.RemoteError, e: msg = _(("""%s doesnt have compatibility to %s""" @@ -211,7 +207,7 @@ class Scheduler(object): raise e def has_enough_resource(self, context, instance_ref, dest): - """ Check if destination host has enough resource for live migration""" + """Check if destination host has enough resource for live migration""" # Getting instance information ec2_id = instance_ref['hostname'] @@ -222,28 +218,27 @@ class Scheduler(object): # Gettin host information service_refs = db.service_get_all_by_host(context, dest) if len(service_refs) <= 0: - msg = _('%s does not exists.') - raise exception.Invalid(msg % dest) + raise exception.Invalid(_('%s does not exists.') % dest) service_ref = service_refs[0] total_cpu = int(service_ref['vcpus']) total_mem = int(service_ref['memory_mb']) total_hdd = int(service_ref['local_gb']) - instances_ref = db.instance_get_all_by_host(context, dest) - for i_ref in instances_ref: + instances_refs = db.instance_get_all_by_host(context, dest) + for i_ref in instances_refs: total_cpu -= int(i_ref['vcpus']) total_mem -= int(i_ref['memory_mb']) total_hdd -= int(i_ref['local_gb']) # Checking host has enough information - logging.debug('host(%s) remains vcpu:%s mem:%s hdd:%s,' % + logging.debug(_('host(%s) remains vcpu:%s mem:%s hdd:%s,') % (dest, total_cpu, total_mem, total_hdd)) - logging.debug('instance(%s) has vcpu:%s mem:%s hdd:%s,' % + logging.debug(_('instance(%s) has vcpu:%s mem:%s hdd:%s,') % (ec2_id, vcpus, mem, hdd)) if total_cpu <= vcpus or total_mem <= mem or total_hdd <= hdd: - msg = '%s doesnt have enough resource for %s' % (dest, ec2_id) - raise exception.NotEmpty(msg) + raise exception.NotEmpty(_('%s is not capable to migrate %s') % + (dest, ec2_id)) logging.debug(_('%s has_enough_resource() for %s') % (dest, ec2_id)) diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index 1cc767a03025..a181225a6798 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -73,17 +73,13 @@ class SchedulerManager(manager.Manager): # Based on bear design summit discussion, # just put this here for bexar release. def show_host_resource(self, context, host, *args): - """ show the physical/usage resource given by hosts.""" + """show the physical/usage resource given by hosts.""" - services = db.service_get_all_by_host(context, host) - if len(services) == 0: - return {'ret': False, 'msg': 'No such Host'} - - compute = [s for s in services if s['topic'] == 'compute'] - if 0 == len(compute): - service_ref = services[0] - else: - service_ref = compute[0] + computes = db.service_get_all_compute_sorted(context) + computes = [s for s,v in computes if s['host'] == host] + if 0 == len(computes): + return {'ret': False, 'msg': 'No such Host or not compute node.'} + service_ref = computes[0] # Getting physical resource information h_resource = {'vcpus': service_ref['vcpus'], @@ -92,13 +88,15 @@ class SchedulerManager(manager.Manager): # Getting usage resource information u_resource = {} - instances_ref = db.instance_get_all_by_host(context, - service_ref['host']) + instances_refs = db.instance_get_all_by_host(context, + service_ref['host']) - if 0 == len(instances_ref): - return {'ret': True, 'phy_resource': h_resource, 'usage': {}} + if 0 == len(instances_refs): + return {'ret': True, + 'phy_resource': h_resource, + 'usage': u_resource} - project_ids = [i['project_id'] for i in instances_ref] + project_ids = [i['project_id'] for i in instances_refs] project_ids = list(set(project_ids)) for p_id in project_ids: vcpus = db.instance_get_vcpu_sum_by_host_and_project(context, diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 09f6ee94a055..344c2d2b5c39 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -20,6 +20,7 @@ Tests For Compute """ import datetime +import mox from nova import compute from nova import context @@ -27,9 +28,12 @@ from nova import db from nova import exception from nova import flags from nova import log as logging +from nova import rpc from nova import test from nova import utils from nova.auth import manager +from nova.compute import manager as compute_manager +from nova.compute import power_state LOG = logging.getLogger('nova.tests.compute') @@ -219,3 +223,304 @@ class ComputeTestCase(test.TestCase): self.assertEqual(ret_val, None) self.compute.terminate_instance(self.context, instance_id) + + def test_update_service_exception(self): + """nova-compute updates Serivce table on DB like below. + nova.service.Serivce.start -> + nova.compute.ComputeManager.update_service. + This testcase confirms if no record found on Service + table, exception can be raised. + """ + host = 'foo' + binary = 'nova-compute' + dbmock = self.mox.CreateMock(db) + dbmock.service_get_by_args(mox.IgnoreArg(), + mox.StrContains(host), + mox.StrContains(binary)).\ + AndRaise(exception.NotFound()) + self.compute.db = dbmock + self.mox.ReplayAll() + try: + self.compute.update_service('dummy', host, binary) + except exception.Invalid, e: + msg = 'Cannot insert compute manager specific info' + c1 = ( 0 <= e.message.find(msg)) + self.assertTrue(c1) + self.mox.ResetAll() + + def test_update_service_success(self): + """nova-compute updates Serivce table on DB like below. + nova.service.Serivce.start -> + nova.compute.ComputeManager.update_service. + In this method, vcpus/memory_mb/local_gb/hypervisor_type/ + hypervisor_version/cpu_info should be changed. + Based on this specification, this testcase confirms + if this method finishes successfully, + meaning self.db.service_update is called with dictinary + + {'vcpu':aaa, 'memory_mb':bbb, 'local_gb':ccc, + 'hypervisor_type':ddd, 'hypervisor_version':eee, + 'cpu_info':fff} + + Since each value of above dict can be obtained through + driver(different depends on environment), + only dictionary keys are checked. + """ + + def dic_key_check(dic): + validkey = ['vcpus', 'memory_mb', 'local_gb', + 'hypervisor_type', 'hypervisor_version', 'cpu_info'] + return (list(set(validkey)) == list(set(dic.keys()))) + + host = 'foo' + binary = 'nova-compute' + service_ref = {'id':1, 'binary':'nova-compute', 'topic':'compute'} + dbmock = self.mox.CreateMock(db) + dbmock.service_get_by_args(mox.IgnoreArg(), + mox.StrContains(host), + mox.StrContains(binary)).\ + AndReturn(service_ref) + dbmock.service_update(mox.IgnoreArg(), + service_ref['id'], + mox.Func(dic_key_check)) + + self.compute.db = dbmock + self.mox.ReplayAll() + try: + self.compute.update_service('dummy', host, binary) + except exception.Invalid, e: + msg = 'Cannot insert compute manager specific info' + c1 = ( 0 <= e.message.find(msg)) + self.assertTrue(c1) + self.mox.ResetAll() + + def _setup_other_managers(self): + self.volume_manager = utils.import_object(FLAGS.volume_manager) + self.network_manager = utils.import_object(FLAGS.network_manager) + self.compute_driver = utils.import_object(FLAGS.compute_driver) + + def test_pre_live_migration_instance_has_no_fixed_ip(self): + """ + if instances that are intended to be migrated doesnt have fixed_ip + (not happens usually), pre_live_migration has to raise Exception. + """ + instance_ref={'id':1, 'volumes':[{'id':1}, {'id':2}], + 'hostname':'i-000000001'} + c = context.get_admin_context() + i_id = instance_ref['id'] + + dbmock = self.mox.CreateMock(db) + dbmock.instance_get(c, i_id).AndReturn(instance_ref) + dbmock.instance_get_fixed_address(c, i_id).AndReturn(None) + + self.compute.db = dbmock + self.mox.ReplayAll() + self.assertRaises(exception.NotFound, + self.compute.pre_live_migration, + c, instance_ref['id']) + self.mox.ResetAll() + + def test_pre_live_migration_instance_has_volume(self): + """if any volumes are attached to the instances that are + intended to be migrated, setup_compute_volume must be + called because aoe module should be inserted at destination + host. This testcase checks on it. + """ + instance_ref={'id':1, 'volumes':[{'id':1}, {'id':2}], + 'hostname':'i-000000001'} + c = context.get_admin_context() + i_id=instance_ref['id'] + + self._setup_other_managers() + dbmock = self.mox.CreateMock(db) + volmock = self.mox.CreateMock(self.volume_manager) + netmock = self.mox.CreateMock(self.network_manager) + drivermock = self.mox.CreateMock(self.compute_driver) + + dbmock.instance_get(c, i_id).AndReturn(instance_ref) + dbmock.instance_get_fixed_address(c, i_id).AndReturn('dummy') + for i in range(len(instance_ref['volumes'])): + vid = instance_ref['volumes'][i]['id'] + volmock.setup_compute_volume(c, vid).InAnyOrder('g1') + netmock.setup_compute_network(c, instance_ref['id']) + drivermock.ensure_filtering_rules_for_instance(instance_ref) + + self.compute.db = dbmock + self.compute.volume_manager = volmock + self.compute.network_manager = netmock + self.compute.driver = drivermock + + self.mox.ReplayAll() + ret = self.compute.pre_live_migration(c, i_id) + self.assertEqual(ret, None) + self.mox.ResetAll() + + def test_pre_live_migration_instance_has_no_volume(self): + """if any volumes are not attached to the instances that are + intended to be migrated, log message should be appears + because administrator can proove instance conditions before + live_migration if any trouble occurs. + """ + instance_ref={'id':1, 'volumes':[], 'hostname':'i-20000001'} + c = context.get_admin_context() + i_id = instance_ref['id'] + + self._setup_other_managers() + dbmock = self.mox.CreateMock(db) + netmock = self.mox.CreateMock(self.network_manager) + drivermock = self.mox.CreateMock(self.compute_driver) + + dbmock.instance_get(c, i_id).AndReturn(instance_ref) + dbmock.instance_get_fixed_address(c, i_id).AndReturn('dummy') + self.mox.StubOutWithMock(compute_manager.LOG, 'info') + compute_manager.LOG.info(_("%s has no volume."), instance_ref['hostname']) + netmock.setup_compute_network(c, i_id) + drivermock.ensure_filtering_rules_for_instance(instance_ref) + + self.compute.db = dbmock + self.compute.network_manager = netmock + self.compute.driver = drivermock + + self.mox.ReplayAll() + ret = self.compute.pre_live_migration(c, i_id) + self.assertEqual(ret, None) + self.mox.ResetAll() + + def test_live_migration_instance_has_volume(self): + """Any volumes are mounted by instances to be migrated are found, + vblade health must be checked before starting live-migration. + And that is checked by check_for_export(). + This testcase confirms check_for_export() is called. + """ + instance_ref={'id':1, 'volumes':[{'id':1}, {'id':2}], 'hostname':'i-00000001'} + c = context.get_admin_context() + dest='dummydest' + i_id = instance_ref['id'] + + self._setup_other_managers() + dbmock = self.mox.CreateMock(db) + drivermock = self.mox.CreateMock(self.compute_driver) + + dbmock.instance_get(c, instance_ref['id']).AndReturn(instance_ref) + self.mox.StubOutWithMock(rpc, 'call') + rpc.call(c, FLAGS.volume_topic, + {"method": "check_for_export", + "args": {'instance_id': i_id}}).InAnyOrder('g1') + rpc.call(c, db.queue_get_for(c, FLAGS.compute_topic, dest), + {"method": "pre_live_migration", + "args": {'instance_id': i_id}}).InAnyOrder('g1') + + self.compute.db = dbmock + self.compute.driver = drivermock + self.mox.ReplayAll() + ret = self.compute.live_migration(c, i_id, dest) + self.assertEqual(ret, None) + self.mox.ResetAll() + + def test_live_migration_instance_has_volume_and_exception(self): + """In addition to test_live_migration_instance_has_volume testcase, + this testcase confirms if any exception raises from check_for_export(). + Then, valid seaquence of this method should recovering instance/volumes + status(ex. instance['state_description'] is changed from 'migrating' + -> 'running', was changed by scheduler) + """ + instance_ref={'id':1, 'volumes':[{'id':1}, {'id':2}], + 'hostname':'i-000000001'} + dest='dummydest' + c = context.get_admin_context() + i_id = instance_ref['id'] + + self._setup_other_managers() + dbmock = self.mox.CreateMock(db) + drivermock = self.mox.CreateMock(self.compute_driver) + + dbmock.instance_get(c, instance_ref['id']).AndReturn(instance_ref) + self.mox.StubOutWithMock(rpc, 'call') + rpc.call(c, FLAGS.volume_topic, + {"method": "check_for_export", + "args": {'instance_id': i_id}}).InAnyOrder('g1') + compute_topic = db.queue_get_for(c, FLAGS.compute_topic, dest) + dbmock.queue_get_for(c, FLAGS.compute_topic, dest).AndReturn(compute_topic) + rpc.call(c, db.queue_get_for(c, FLAGS.compute_topic, dest), + {"method": "pre_live_migration", + "args": {'instance_id': i_id}}).\ + InAnyOrder('g1').AndRaise(rpc.RemoteError('du', 'mm', 'y')) + self.mox.StubOutWithMock(compute_manager.LOG, 'error') + compute_manager.LOG.error('Pre live migration for %s failed at %s', + instance_ref['hostname'], dest) + dbmock.instance_set_state(c, i_id, power_state.RUNNING, 'running') + for i in range(len(instance_ref['volumes'])): + vid = instance_ref['volumes'][i]['id'] + dbmock.volume_update(c, vid, {'status': 'in-use'}) + + self.compute.db = dbmock + self.compute.driver = drivermock + self.mox.ReplayAll() + self.assertRaises(rpc.RemoteError, + self.compute.live_migration, + c, i_id, dest) + self.mox.ResetAll() + + def test_live_migration_instance_has_no_volume_and_exception(self): + """Simpler than test_live_migration_instance_has_volume_and_exception""" + + instance_ref={'id':1, 'volumes':[], 'hostname':'i-000000001'} + dest='dummydest' + c = context.get_admin_context() + i_id = instance_ref['id'] + + self._setup_other_managers() + dbmock = self.mox.CreateMock(db) + drivermock = self.mox.CreateMock(self.compute_driver) + + dbmock.instance_get(c, instance_ref['id']).AndReturn(instance_ref) + self.mox.StubOutWithMock(rpc, 'call') + compute_topic = db.queue_get_for(c, FLAGS.compute_topic, dest) + dbmock.queue_get_for(c, FLAGS.compute_topic, dest).AndReturn(compute_topic) + rpc.call(c, compute_topic, + {"method": "pre_live_migration", + "args": {'instance_id': i_id}}).\ + AndRaise(rpc.RemoteError('du', 'mm', 'y')) + self.mox.StubOutWithMock(compute_manager.LOG, 'error') + compute_manager.LOG.error('Pre live migration for %s failed at %s', + instance_ref['hostname'], dest) + dbmock.instance_set_state(c, i_id, power_state.RUNNING, 'running') + + self.compute.db = dbmock + self.compute.driver = drivermock + self.mox.ReplayAll() + self.assertRaises(rpc.RemoteError, + self.compute.live_migration, + c, i_id, dest) + self.mox.ResetAll() + + def test_live_migration_instance_has_volume(self): + """Simpler version than test_live_migration_instance_has_volume.""" + instance_ref={'id':1, 'volumes':[{'id':1}, {'id':2}], + 'hostname':'i-000000001'} + c = context.get_admin_context() + dest='dummydest' + i_id = instance_ref['id'] + + self._setup_other_managers() + dbmock = self.mox.CreateMock(db) + drivermock = self.mox.CreateMock(self.compute_driver) + + dbmock.instance_get(c, i_id).AndReturn(instance_ref) + self.mox.StubOutWithMock(rpc, 'call') + rpc.call(c, FLAGS.volume_topic, + {"method": "check_for_export", + "args": {'instance_id': i_id}}).InAnyOrder('g1') + compute_topic = db.queue_get_for(c, FLAGS.compute_topic, dest) + dbmock.queue_get_for(c, FLAGS.compute_topic, dest).AndReturn(compute_topic) + rpc.call(c, compute_topic, + {"method": "pre_live_migration", + "args": {'instance_id': i_id}}).InAnyOrder('g1') + drivermock.live_migration(c, instance_ref, dest) + + self.compute.db = dbmock + self.compute.driver = drivermock + self.mox.ReplayAll() + ret = self.compute.live_migration(c, i_id, dest) + self.assertEqual(ret, None) + self.mox.ResetAll() diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index 9d458244b042..c62bca9b1238 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -20,10 +20,12 @@ Tests For Scheduler """ import datetime +import mox from mox import IgnoreArg from nova import context from nova import db +from nova import exception from nova import flags from nova import service from nova import test @@ -32,6 +34,8 @@ from nova import utils from nova.auth import manager as auth_manager from nova.scheduler import manager from nova.scheduler import driver +from nova.compute import power_state +from nova.db.sqlalchemy import models FLAGS = flags.FLAGS @@ -75,7 +79,102 @@ class SchedulerTestCase(test.TestCase): 'args': {'num': 7}}) self.mox.ReplayAll() scheduler.named_method(ctxt, 'topic', num=7) + + def test_show_host_resource_host_not_exit(self): + """ + A testcase of driver.has_enough_resource + given host does not exists. + """ + scheduler = manager.SchedulerManager() + dest = 'dummydest' + ctxt = context.get_admin_context() + self.mox.StubOutWithMock(manager, 'db', use_mock_anything=True) + manager.db.service_get_all_compute_sorted(mox.IgnoreArg()).\ + AndReturn([]) + + self.mox.ReplayAll() + result = scheduler.show_host_resource(ctxt, dest) + # ret should be dict + keys = ['ret', 'msg'] + c1 = list(set(result.keys())) == list(set(keys)) + c2 = not result['ret'] + c3 = result['msg'].find('No such Host or not compute node') <= 0 + self.assertTrue( c1 and c2 and c3) + self.mox.UnsetStubs() + + def test_show_host_resource_no_project(self): + """ + A testcase of driver.show_host_resource + no instance stays on the given host + """ + scheduler = manager.SchedulerManager() + dest = 'dummydest' + ctxt = context.get_admin_context() + r0 = {'vcpus':16, 'memory_mb':32, 'local_gb':100} + service_ref = {'id':1, 'host':dest} + service_ref.update(r0) + + self.mox.StubOutWithMock(manager, 'db', use_mock_anything=True) + manager.db.service_get_all_compute_sorted(mox.IgnoreArg()).\ + AndReturn([(service_ref, 0)]) + manager.db.instance_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([]) + + self.mox.ReplayAll() + result = scheduler.show_host_resource(ctxt, dest) + # ret should be dict + keys = ['ret', 'phy_resource', 'usage'] + c1 = list(set(result.keys())) == list(set(keys)) + c2 = result['ret'] + c3 = result['phy_resource'] == r0 + c4 = result['usage'] == {} + self.assertTrue( c1 and c2 and c3 and c4) + self.mox.UnsetStubs() + + def test_show_host_resource_works_correctly(self): + """ + A testcase of driver.show_host_resource + to make sure everything finished with no error. + """ + scheduler = manager.SchedulerManager() + dest = 'dummydest' + ctxt = context.get_admin_context() + r0 = {'vcpus':16, 'memory_mb':32, 'local_gb':100} + r1 = {'vcpus':10, 'memory_mb':4, 'local_gb':20} + r2 = {'vcpus':10, 'memory_mb':20, 'local_gb':30} + service_ref = {'id':1, 'host':dest} + service_ref.update(r0) + instance_ref2 = {'id':2, 'project_id':'p-01', 'host':'dummy'} + instance_ref2.update(r1) + instance_ref3 = {'id':3, 'project_id':'p-02', 'host':'dummy'} + instance_ref3.update(r1) + + self.mox.StubOutWithMock(manager, 'db', use_mock_anything=True) + manager.db.service_get_all_compute_sorted(mox.IgnoreArg()).\ + AndReturn([(service_ref, 0)]) + manager.db.instance_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([instance_ref2, instance_ref3]) + for p in ['p-01', 'p-02']: + manager.db.instance_get_vcpu_sum_by_host_and_project( + ctxt, dest, p).AndReturn(r2['vcpus']) + manager.db.instance_get_memory_sum_by_host_and_project( + ctxt, dest, p).AndReturn(r2['memory_mb']) + manager.db.instance_get_disk_sum_by_host_and_project( + ctxt, dest, p).AndReturn(r2['local_gb']) + + self.mox.ReplayAll() + result = scheduler.show_host_resource(ctxt, dest) + # ret should be dict + keys = ['ret', 'phy_resource', 'usage'] + c1 = list(set(result.keys())) == list(set(keys)) + c2 = result['ret'] + c3 = result['phy_resource'] == r0 + c4 = result['usage'].keys() == ['p-01', 'p-02'] + c5 = result['usage']['p-01'] == r2 + c6 = result['usage']['p-02'] == r2 + self.assertTrue( c1 and c2 and c3 and c4 and c5 and c6) + self.mox.UnsetStubs() class ZoneSchedulerTestCase(test.TestCase): """Test case for zone scheduler""" @@ -384,3 +483,626 @@ class SimpleDriverTestCase(test.TestCase): volume2.delete_volume(self.context, volume_id) volume1.kill() volume2.kill() + + def test_scheduler_live_migraiton_with_volume(self): + """ + driver.scheduler_live_migration finishes successfully + (volumes are attached to instances) + This testcase make sure schedule_live_migration + changes instance state from 'running' -> 'migrating' + """ + driver_i = self.scheduler.driver + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + i_ref = {'id':1, 'hostname':'i-00000001', 'host':'dummy', + 'volumes':[{'id':1}, {'id':2}]} + dest = 'dummydest' + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + # must be IgnoreArg() because scheduler changes ctxt's memory address + driver.db.instance_get(mox.IgnoreArg(), i_ref['id']).AndReturn(i_ref) + + self.mox.StubOutWithMock(driver_i, '_live_migration_src_check') + driver_i._live_migration_src_check(mox.IgnoreArg(), i_ref) + self.mox.StubOutWithMock(driver_i, '_live_migration_dest_check') + driver_i._live_migration_dest_check(mox.IgnoreArg(), i_ref, dest) + self.mox.StubOutWithMock(driver_i, '_live_migration_common_check') + driver_i._live_migration_common_check(mox.IgnoreArg(), i_ref, dest) + driver.db.instance_set_state(mox.IgnoreArg(), i_ref['id'], + power_state.PAUSED, 'migrating') + for v in i_ref['volumes']: + driver.db.volume_update(mox.IgnoreArg(), v['id'], + {'status': 'migrating'}) + self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) + kwargs={'instance_id':i_ref['id'], 'dest':dest} + rpc.cast(ctxt, db.queue_get_for(ctxt, topic, i_ref['host']), + {"method": 'live_migration', + "args": kwargs}) + + self.mox.ReplayAll() + self.scheduler.live_migration(ctxt, topic, + instance_id=i_ref['id'], dest=dest) + self.mox.UnsetStubs() + + def test_scheduler_live_migraiton_no_volume(self): + """ + driver.scheduler_live_migration finishes successfully + (volumes are attached to instances) + This testcase make sure schedule_live_migration + changes instance state from 'running' -> 'migrating' + """ + driver_i = self.scheduler.driver + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', 'volumes':[]} + dest = 'dummydest' + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + # must be IgnoreArg() because scheduler changes ctxt's memory address + driver.db.instance_get(mox.IgnoreArg(), i_ref['id']).AndReturn(i_ref) + self.mox.StubOutWithMock(driver_i, '_live_migration_src_check') + driver_i._live_migration_src_check(mox.IgnoreArg(), i_ref) + self.mox.StubOutWithMock(driver_i, '_live_migration_dest_check') + driver_i._live_migration_dest_check(mox.IgnoreArg(), i_ref, dest) + self.mox.StubOutWithMock(driver_i, '_live_migration_common_check') + driver_i._live_migration_common_check(mox.IgnoreArg(), i_ref, dest) + driver.db.instance_set_state(mox.IgnoreArg(), i_ref['id'], + power_state.PAUSED, 'migrating') + self.mox.StubOutWithMock(rpc, 'cast', use_mock_anything=True) + kwargs={'instance_id':i_ref['id'], 'dest':dest} + rpc.cast(ctxt, db.queue_get_for(ctxt, topic, i_ref['host']), + {"method": 'live_migration', + "args": kwargs}) + + self.mox.ReplayAll() + self.scheduler.live_migration(ctxt, topic, + instance_id=i_ref['id'], dest=dest) + self.mox.UnsetStubs() + + def test_live_migraiton_src_check_instance_not_running(self): + """ + A testcase of driver._live_migration_src_check. + The instance given by instance_id is not running. + """ + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + dest = 'dummydest' + i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', + 'volumes':[], 'state_description':'migrating', + 'state':power_state.RUNNING} + + self.mox.ReplayAll() + try: + self.scheduler.driver._live_migration_src_check(ctxt, i_ref) + except exception.Invalid, e: + self.assertTrue(e.message.find('is not running') > 0) + self.mox.UnsetStubs() + + def test_live_migraiton_src_check_volume_node_not_alive(self): + """ + A testcase of driver._live_migration_src_check. + Volume node is not alive if any volumes are attached to + the given instance. + """ + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', + 'volumes':[{'id':1}, {'id':2}], + 'state_description':'running', 'state':power_state.RUNNING} + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_topic(mox.IgnoreArg(), 'volume').\ + AndReturn([]) + + self.mox.ReplayAll() + try: + self.scheduler.driver._live_migration_src_check(ctxt, i_ref) + except exception.Invalid, e: + self.assertTrue(e.message.find('volume node is not alive') >= 0) + self.mox.UnsetStubs() + + def test_live_migraiton_src_check_volume_node_not_alive(self): + """ + A testcase of driver._live_migration_src_check. + The testcase make sure src-compute node is alive. + """ + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', 'volumes':[], + 'state_description':'running', 'state':power_state.RUNNING} + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_topic(mox.IgnoreArg(), 'compute').\ + AndReturn([]) + + self.mox.ReplayAll() + try: + self.scheduler.driver._live_migration_src_check(ctxt, i_ref) + except exception.Invalid, e: + self.assertTrue(e.message.find('is not alive') >= 0) + self.mox.UnsetStubs() + + def test_live_migraiton_src_check_works_correctly(self): + """ + A testcase of driver._live_migration_src_check. + The testcase make sure everything finished with no error. + """ + driver_i = self.scheduler.driver + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', 'volumes':[], + 'state_description':'running', 'state':power_state.RUNNING} + service_ref = models.Service() + service_ref.__setitem__('id', 1) + service_ref.__setitem__('host', i_ref['host']) + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_topic(mox.IgnoreArg(), 'compute').\ + AndReturn([service_ref]) + self.mox.StubOutWithMock(driver_i, 'service_is_up') + driver_i.service_is_up(service_ref).AndReturn(True) + + self.mox.ReplayAll() + ret = driver_i._live_migration_src_check(ctxt, i_ref) + self.assertTrue(ret == None) + self.mox.UnsetStubs() + + def test_live_migraiton_dest_check_service_not_exists(self): + """ + A testcase of driver._live_migration_dst_check. + Destination host does not exist. + """ + driver_i = self.scheduler.driver + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'} + service_ref = models.Service() + service_ref.__setitem__('id', 1) + service_ref.__setitem__('host', i_ref['host']) + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([]) + + self.mox.ReplayAll() + try: + driver_i._live_migration_dest_check(ctxt, i_ref, dest) + except exception.Invalid, e: + self.assertTrue(e.message.find('does not exists') >= 0) + self.mox.UnsetStubs() + + def test_live_migraiton_dest_check_service_isnot_compute(self): + """ + A testcase of driver._live_migration_dst_check. + Destination host does not provide compute. + """ + driver_i = self.scheduler.driver + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'} + service_ref = models.Service() + service_ref.__setitem__('id', 1) + service_ref.__setitem__('host', i_ref['host']) + service_ref.__setitem__('topic', 'api') + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([service_ref]) + + self.mox.ReplayAll() + try: + driver_i._live_migration_dest_check(ctxt, i_ref, dest) + except exception.Invalid, e: + self.assertTrue(e.message.find('must be compute node') >= 0) + self.mox.UnsetStubs() + + def test_live_migraiton_dest_check_service_not_alive(self): + """ + A testcase of driver._live_migration_dst_check. + Destination host compute service is not alive. + """ + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'} + service_ref = models.Service() + service_ref.__setitem__('id', 1) + service_ref.__setitem__('host', i_ref['host']) + service_ref.__setitem__('topic', 'compute') + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([service_ref]) + self.mox.StubOutWithMock(self.scheduler.driver, 'service_is_up') + self.scheduler.driver.service_is_up(service_ref).AndReturn(False) + + self.mox.ReplayAll() + try: + self.scheduler.driver._live_migration_dest_check(ctxt, i_ref, dest) + except exception.Invalid, e: + self.assertTrue(e.message.find('is not alive') >= 0) + self.mox.UnsetStubs() + + def test_live_migraiton_dest_check_service_same_host(self): + """ + A testcase of driver._live_migration_dst_check. + Destination host is same as src host. + """ + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + i_ref = {'id':1, 'hostname':'i-01', 'host':'dummydest'} + service_ref = models.Service() + service_ref.__setitem__('id', 1) + service_ref.__setitem__('host', i_ref['host']) + service_ref.__setitem__('topic', 'compute') + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([service_ref]) + self.mox.StubOutWithMock(self.scheduler.driver, 'service_is_up') + self.scheduler.driver.service_is_up(service_ref).AndReturn(True) + + self.mox.ReplayAll() + try: + self.scheduler.driver._live_migration_dest_check(ctxt, i_ref, dest) + except exception.Invalid, e: + self.assertTrue(e.message.find('is running now. choose other host') >= 0) + self.mox.UnsetStubs() + + def test_live_migraiton_dest_check_service_works_correctly(self): + """ + A testcase of driver._live_migration_dst_check. + The testcase make sure everything finished with no error. + """ + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + i_ref = {'id':1, 'hostname':'i-01', 'host':'dummydest'} + service_ref = models.Service() + service_ref.__setitem__('id', 1) + service_ref.__setitem__('host', i_ref['host']) + service_ref.__setitem__('topic', 'compute') + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([service_ref]) + self.mox.StubOutWithMock(self.scheduler.driver, 'service_is_up') + self.scheduler.driver.service_is_up(service_ref).AndReturn(True) + self.mox.StubOutWithMock(self.scheduler.driver, 'has_enough_resource') + self.scheduler.driver.has_enough_resource(mox.IgnoreArg(), i_ref, dest) + + self.mox.ReplayAll() + try: + self.scheduler.driver._live_migration_dest_check(ctxt, i_ref, dest) + except exception.Invalid, e: + self.assertTrue(e.message.find('is running now. choose other host') >= 0) + self.mox.UnsetStubs() + + def test_live_migraiton_common_check_service_dest_not_exists(self): + """ + A testcase of driver._live_migration_common_check. + Destination host does not exist. + """ + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'} + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([]) + + self.mox.ReplayAll() + try: + self.scheduler.driver._live_migration_common_check(ctxt, + i_ref, + dest) + except exception.Invalid, e: + self.assertTrue(e.message.find('does not exists') >= 0) + self.mox.UnsetStubs() + + def test_live_migraiton_common_check_service_orig_not_exists(self): + """ + A testcase of driver._live_migration_common_check. + Original host(an instance launched on) does not exist. + """ + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', 'launched_on':'h1'} + service_ref = models.Service() + service_ref.__setitem__('id', 1) + service_ref.__setitem__('topic', 'compute') + service_ref.__setitem__('host', i_ref['host']) + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([service_ref]) + driver.db.service_get_all_by_host(mox.IgnoreArg(), + i_ref['launched_on']).\ + AndReturn([]) + + self.mox.ReplayAll() + try: + self.scheduler.driver._live_migration_common_check(ctxt, + i_ref, + dest) + except exception.Invalid, e: + msg = 'where instance was launched at) does not exists' + self.assertTrue(e.message.find(msg) >= 0) + self.mox.UnsetStubs() + + def test_live_migraiton_common_check_service_different_hypervisor(self): + """ + A testcase of driver._live_migration_common_check. + Original host and dest host has different hypervisor type. + """ + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + i_ref = {'id':1, 'hostname':'i-01', + 'host':'dummy', 'launched_on':'h1'} + service_ref = models.Service() + service_ref.__setitem__('id', 1) + service_ref.__setitem__('topic', 'compute') + service_ref.__setitem__('hypervisor_type', 'kvm') + service_ref2 = models.Service() + service_ref2.__setitem__('id', 2) + service_ref2.__setitem__('hypervisor_type', 'xen') + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([service_ref]) + driver.db.service_get_all_by_host(mox.IgnoreArg(), + i_ref['launched_on']).\ + AndReturn([service_ref2]) + + self.mox.ReplayAll() + try: + self.scheduler.driver._live_migration_common_check(ctxt, + i_ref, + dest) + except exception.Invalid, e: + msg = 'Different hypervisor type' + self.assertTrue(e.message.find(msg) >= 0) + self.mox.UnsetStubs() + + def test_live_migraiton_common_check_service_different_version(self): + """ + A testcase of driver._live_migration_common_check. + Original host and dest host has different hypervisor version. + """ + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + i_ref = {'id':1, 'hostname':'i-01', + 'host':'dummy', 'launched_on':'h1'} + service_ref = models.Service() + service_ref.__setitem__('id', 1) + service_ref.__setitem__('topic', 'compute') + service_ref.__setitem__('hypervisor_version', 12000) + service_ref2 = models.Service() + service_ref2.__setitem__('id', 2) + service_ref2.__setitem__('hypervisor_version', 12001) + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([service_ref]) + driver.db.service_get_all_by_host(mox.IgnoreArg(), + i_ref['launched_on']).\ + AndReturn([service_ref2]) + + self.mox.ReplayAll() + try: + self.scheduler.driver._live_migration_common_check(ctxt, + i_ref, + dest) + except exception.Invalid, e: + msg = 'Older hypervisor version' + self.assertTrue(e.message.find(msg) >= 0) + self.mox.UnsetStubs() + + def test_live_migraiton_common_check_service_checking_cpuinfo_fail(self): + """ + A testcase of driver._live_migration_common_check. + Original host and dest host has different hypervisor version. + """ + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + i_ref = {'id':1, 'hostname':'i-01', + 'host':'dummy', 'launched_on':'h1'} + service_ref = models.Service() + service_ref.__setitem__('id', 1) + service_ref.__setitem__('topic', 'compute') + service_ref.__setitem__('hypervisor_version', 12000) + service_ref2 = models.Service() + service_ref2.__setitem__('id', 2) + service_ref2.__setitem__('hypervisor_version', 12000) + service_ref2.__setitem__('cpuinfo', 'info') + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([service_ref]) + driver.db.service_get_all_by_host(mox.IgnoreArg(), + i_ref['launched_on']).\ + AndReturn([service_ref2]) + driver.db.queue_get_for(mox.IgnoreArg(), FLAGS.compute_topic, dest) + self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True) + driver.rpc.call(mox.IgnoreArg(), mox.IgnoreArg(), + {"method": 'compare_cpu', + "args": {'cpu_info': service_ref2['cpu_info']}}).\ + AndRaise(rpc.RemoteError('doesnt have compatibility to', '', '')) + + self.mox.ReplayAll() + try: + self.scheduler.driver._live_migration_common_check(ctxt, + i_ref, + dest) + except rpc.RemoteError, e: + msg = 'doesnt have compatibility to' + self.assertTrue(e.message.find(msg) >= 0) + self.mox.UnsetStubs() + + def test_live_migraiton_common_check_service_works_correctly(self): + """ + A testcase of driver._live_migration_common_check. + The testcase make sure everything finished with no error. + """ + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + i_ref = {'id':1, 'hostname':'i-01', + 'host':'dummy', 'launched_on':'h1'} + service_ref = models.Service() + service_ref.__setitem__('id', 1) + service_ref.__setitem__('topic', 'compute') + service_ref.__setitem__('hypervisor_version', 12000) + service_ref2 = models.Service() + service_ref2.__setitem__('id', 2) + service_ref2.__setitem__('hypervisor_version', 12000) + service_ref2.__setitem__('cpuinfo', 'info') + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([service_ref]) + driver.db.service_get_all_by_host(mox.IgnoreArg(), + i_ref['launched_on']).\ + AndReturn([service_ref2]) + driver.db.queue_get_for(mox.IgnoreArg(), FLAGS.compute_topic, dest) + self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True) + driver.rpc.call(mox.IgnoreArg(), mox.IgnoreArg(), + {"method": 'compare_cpu', + "args": {'cpu_info': service_ref2['cpu_info']}}) + + self.mox.ReplayAll() + ret = self.scheduler.driver._live_migration_common_check(ctxt, + i_ref, + dest) + self.assertTrue(ret == None) + self.mox.UnsetStubs() + + def test_has_enough_resource_lack_resource_vcpu(self): + """ + A testcase of driver.has_enough_resource. + Lack of vcpu.(boundary check) + """ + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + service_ref = {'id':1, 'vcpus':16, 'memory_mb':32, 'local_gb':100} + i_ref1 = {'id':1, 'hostname':'i-01', 'host':'dummy', + 'vcpus':6, 'memory_mb':8, 'local_gb':10} + i_ref2 = {'id':2, 'hostname':'i-01', 'host':'dummy', + 'vcpus':5, 'memory_mb':8, 'local_gb':10} + i_ref3 = {'id':3, 'hostname':'i-02', 'host':'dummy', + 'vcpus':5, 'memory_mb':8, 'local_gb':10} + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([service_ref]) + driver.db.instance_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([i_ref2, i_ref3]) + + self.mox.ReplayAll() + try: + self.scheduler.driver.has_enough_resource(ctxt, i_ref1, dest) + except exception.NotEmpty, e: + msg = 'is not capable to migrate' + self.assertTrue(e.message.find(msg) >= 0) + self.mox.UnsetStubs() + + def test_has_enough_resource_lack_resource_memory(self): + """ + A testcase of driver.has_enough_resource. + Lack of memory_mb.(boundary check) + """ + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + service_ref = {'id':1, 'vcpus':16, 'memory_mb':32, 'local_gb':100} + i_ref1 = {'id':1, 'hostname':'i-01', 'host':'dummy', + 'vcpus':5, 'memory_mb':16, 'local_gb':10} + i_ref2 = {'id':2, 'hostname':'i-01', 'host':'dummy', + 'vcpus':5, 'memory_mb':8, 'local_gb':10} + i_ref3 = {'id':3, 'hostname':'i-02', 'host':'dummy', + 'vcpus':5, 'memory_mb':8, 'local_gb':10} + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([service_ref]) + driver.db.instance_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([i_ref2, i_ref3]) + + self.mox.ReplayAll() + try: + self.scheduler.driver.has_enough_resource(ctxt, i_ref1, dest) + except exception.NotEmpty, e: + msg = 'is not capable to migrate' + self.assertTrue(e.message.find(msg) >= 0) + self.mox.UnsetStubs() + self.mox.UnsetStubs() + + def test_has_enough_resource_lack_resource_disk(self): + """ + A testcase of driver.has_enough_resource. + Lack of local_gb.(boundary check) + """ + scheduler = manager.SchedulerManager() + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + service_ref = {'id':1, 'vcpus':16, 'memory_mb':32, 'local_gb':100} + i_ref1 = {'id':1, 'hostname':'i-01', 'host':'dummy', + 'vcpus':5, 'memory_mb':8, 'local_gb':80} + i_ref2 = {'id':2, 'hostname':'i-01', 'host':'dummy', + 'vcpus':5, 'memory_mb':8, 'local_gb':10} + i_ref3 = {'id':3, 'hostname':'i-02', 'host':'dummy', + 'vcpus':5, 'memory_mb':8, 'local_gb':10} + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([service_ref]) + driver.db.instance_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([i_ref2, i_ref3]) + + self.mox.ReplayAll() + try: + self.scheduler.driver.has_enough_resource(ctxt, i_ref1, dest) + except exception.NotEmpty, e: + msg = 'is not capable to migrate' + self.assertTrue(e.message.find(msg) >= 0) + self.mox.UnsetStubs() + + def test_has_enough_resource_works_correctly(self): + """ + A testcase of driver.has_enough_resource + to make sure everything finished with no error. + """ + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + service_ref = {'id':1, 'vcpus':16, 'memory_mb':32, 'local_gb':100} + i_ref1 = {'id':1, 'hostname':'i-01', 'host':'dummy', + 'vcpus':5, 'memory_mb':8, 'local_gb':10} + i_ref2 = {'id':2, 'hostname':'i-01', 'host':'dummy', + 'vcpus':5, 'memory_mb':8, 'local_gb':10} + i_ref3 = {'id':3, 'hostname':'i-02', 'host':'dummy', + 'vcpus':5, 'memory_mb':8, 'local_gb':10} + + self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) + driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([service_ref]) + driver.db.instance_get_all_by_host(mox.IgnoreArg(), dest).\ + AndReturn([i_ref2, i_ref3]) + + self.mox.ReplayAll() + ret = self.scheduler.driver.has_enough_resource(ctxt, i_ref1, dest) + self.assertTrue(ret == None) + self.mox.UnsetStubs() diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py index a67c8d1e8c24..a147e69b4433 100644 --- a/nova/tests/test_service.py +++ b/nova/tests/test_service.py @@ -30,6 +30,7 @@ from nova import rpc from nova import test from nova import service from nova import manager +from nova.compute import manager as compute_manager FLAGS = flags.FLAGS flags.DEFINE_string("fake_manager", "nova.tests.test_service.FakeManager", @@ -41,7 +42,20 @@ class FakeManager(manager.Manager): def test_method(self): return 'manager' +# temporary variable to store host/binary/self.mox from each method to fake class. +global_host = None +global_binary = None +global_mox = None +class FakeComputeManager(compute_manager.ComputeManager): + """Fake computemanager manager for tests""" + + def __init__(self, compute_driver=None, *args, **kwargs): + global ghost, gbinary, gmox + self.update_service(mox.IgnoreArg(), mox.StrContains(ghost), mox.StrContains(gbinary)) + gmox.ReplayAll() + super(FakeComputeManager, self).__init__(compute_driver, *args, **kwargs) + class ExtendedService(service.Service): def test_method(self): return 'service' @@ -258,3 +272,48 @@ class ServiceTestCase(test.TestCase): serv.report_state() self.assert_(not serv.model_disconnected) + + def test_compute_can_update_services(self): + """ + Test nova-compute successfully updated Service table on DB. + Doing so, self.manager.update_service must be called + if 'self.binary == nova-compute', and this testcase checks on it. + """ + host = 'foo' + binary = 'nova-compute' + topic = 'compute1' + service_create = {'host': host, + 'binary': binary, + 'topic': topic, + 'report_count': 0, + 'availability_zone': 'nova'} + service_ref = {'host': host, + 'binary': binary, + 'topic': topic, + 'report_count': 0, + 'availability_zone': 'nova', + 'id': 1} + + service.db.service_get_by_args(mox.IgnoreArg(), + host, + binary).AndRaise(exception.NotFound()) + service.db.service_create(mox.IgnoreArg(), + service_create).AndReturn(service_ref) + self.mox.StubOutWithMock(compute_manager.ComputeManager, 'update_service') + + + global ghost, gbinary, gmox + ghost = host + gbinary = binary + gmox = self.mox + + serv = service.Service(host, + binary, + topic, + 'nova.tests.test_service.FakeComputeManager') + # ReplayAll has been executed FakeComputeManager.__init__() + #self.mox.ReplayAll() + serv.start() + serv.stop() + + diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index afdc89ba2a7b..177e8f02192e 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -14,21 +14,29 @@ # License for the specific language governing permissions and limitations # under the License. +import mox + from xml.etree.ElementTree import fromstring as xml_to_tree from xml.dom.minidom import parseString as xml_to_dom from nova import context from nova import db +from nova import exception from nova import flags from nova import test +from nova import logging from nova import utils from nova.api.ec2 import cloud from nova.auth import manager +from nova.db.sqlalchemy import models +from nova.compute import power_state from nova.virt import libvirt_conn FLAGS = flags.FLAGS flags.DECLARE('instances_path', 'nova.compute.manager') +libvirt = None +libxml2 = None class LibvirtConnTestCase(test.TestCase): def setUp(self): @@ -52,6 +60,38 @@ class LibvirtConnTestCase(test.TestCase): 'bridge': 'br101', 'instance_type': 'm1.small'} + def _driver_dependent_test_setup(self): + """ + Setup method. + Call this method at the top of each testcase method, + if the testcase is necessary libvirt and cheetah. + """ + try : + global libvirt + global libxml2 + libvirt_conn.libvirt = __import__('libvirt') + libvirt_conn.libxml2 = __import__('libxml2') + libvirt_conn._late_load_cheetah() + libvirt = __import__('libvirt') + except ImportError, e: + logging.warn("""This test has not been done since """ + """using driver-dependent library Cheetah/libvirt/libxml2.""") + raise e + + # inebitable mocks for calling + #nova.virt.libvirt_conn.LibvirtConnection.__init__ + nwmock = self.mox.CreateMock(libvirt_conn.NWFilterFirewall) + self.mox.StubOutWithMock(libvirt_conn, 'NWFilterFirewall', + use_mock_anything=True) + libvirt_conn.NWFilterFirewall(mox.IgnoreArg()).AndReturn(nwmock) + + obj = utils.import_object(FLAGS.firewall_driver) + fwmock = self.mox.CreateMock(obj) + self.mox.StubOutWithMock(libvirt_conn, 'utils', + use_mock_anything=True) + libvirt_conn.utils.import_object(FLAGS.firewall_driver).AndReturn(fwmock) + return nwmock, fwmock + def test_xml_and_uri_no_ramdisk_no_kernel(self): instance_data = dict(self.test_instance) self._check_xml_and_uri(instance_data, @@ -188,9 +228,8 @@ class LibvirtConnTestCase(test.TestCase): expected_result, '%s failed common check %d' % (xml, i)) - # This test is supposed to make sure we don't override a specifically - # set uri - # + # This test is supposed to make sure we don't override a specifically set uri + # # Deliberately not just assigning this string to FLAGS.libvirt_uri and # checking against that later on. This way we make sure the # implementation doesn't fiddle around with the FLAGS. @@ -202,6 +241,480 @@ class LibvirtConnTestCase(test.TestCase): uri = conn.get_uri() self.assertEquals(uri, testuri) + def test_get_memory_mb(self): + """ + Check if get_memory_mb returns memory value + Connection/OS/driver differenct does not matter for this method, + so everyone can execute for checking. + """ + try: + self._driver_dependent_test_setup() + except: + return + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + self.assertTrue(0 < conn.get_memory_mb()) + self.mox.UnsetStubs() + + def test_get_cpu_info_works_correctly(self): + """ + Check if get_cpu_info works correctly. + (in case libvirt.getCapabilities() works correctly) + """ + xml=("""x86_64Nehalem""" + """Intel""" + """""" + """""" + """""" + """""" + """""" + """""" + """""") + + try: + self._driver_dependent_test_setup() + except: + return + self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True) + libvirt_conn.LibvirtConnection._conn.getCapabilities().AndReturn(xml) + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + self.assertTrue(0 < len(conn.get_cpu_info())) + self.mox.UnsetStubs() + + def test_get_cpu_info_inappropreate_xml(self): + """ + Check if get_cpu_info raises exception + in case libvirt.getCapabilities() returns wrong xml + (in case of xml doesnt have tag) + """ + xml=("""x86_64Nehalem""" + """Intel""" + """""" + """""" + """""" + """""" + """""" + """""" + """""") + + try: + self._driver_dependent_test_setup() + except: + return + self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True) + libvirt_conn.LibvirtConnection._conn.getCapabilities().AndReturn(xml) + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + try: + conn.get_cpu_info() + except exception.Invalid, e: + c1 = ( 0 <= e.message.find('Invalid xml') ) + self.assertTrue(c1) + self.mox.UnsetStubs() + + def test_get_cpu_info_inappropreate_xml2(self): + """ + Check if get_cpu_info raises exception + in case libvirt.getCapabilities() returns wrong xml + (in case of xml doesnt have inproper tag + meaning missing "socket" attribute) + """ + xml=("""x86_64Nehalem""" + """Intel""" + """""" + """""" + """""" + """""" + """""" + """""" + """""") + + try: + self._driver_dependent_test_setup() + except: + return + self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True) + libvirt_conn.LibvirtConnection._conn.getCapabilities().AndReturn(xml) + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + try: + conn.get_cpu_info() + except exception.Invalid, e: + c1 = ( 0 <= e.message.find('Invalid xml: topology') ) + self.assertTrue(c1) + self.mox.UnsetStubs() + + def test_compare_cpu_works_correctly(self): + """Calling libvirt.compute_cpu() and works correctly """ + + t = ("""{"arch":"%s", "model":"%s", "vendor":"%s", """ + """"topology":{"cores":"%s", "threads":"%s", """ + """"sockets":"%s"}, "features":[%s]}""") + cpu_info = t % ('x86', 'model', 'vendor', '2', '1', '4', '"tm"') + + try: + self._driver_dependent_test_setup() + except: + return + + self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True) + libvirt_conn.LibvirtConnection._conn.compareCPU(mox.IgnoreArg(),0).AndReturn(1) + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + self.assertTrue( None== conn.compare_cpu(cpu_info)) + self.mox.UnsetStubs() + + def test_compare_cpu_raises_exception(self): + """ + Libvirt-related exception occurs when calling + libvirt.compare_cpu(). + """ + t = ("""{"arch":"%s", "model":"%s", "vendor":"%s", """ + """"topology":{"cores":"%s", "threads":"%s", """ + """"sockets":"%s"}, "features":[%s]}""") + cpu_info = t % ('x86', 'model', 'vendor', '2', '1', '4', '"tm"') + + try: + self._driver_dependent_test_setup() + except: + return + + self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', + use_mock_anything=True) + libvirt_conn.LibvirtConnection._conn.compareCPU(mox.IgnoreArg(),0).\ + AndRaise(libvirt.libvirtError('ERR')) + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + self.assertRaises(libvirt.libvirtError, conn.compare_cpu, cpu_info) + self.mox.UnsetStubs() + + def test_compare_cpu_no_compatibility(self): + """libvirt.compare_cpu() return less than 0.(no compatibility)""" + + t = ("""{"arch":"%s", "model":"%s", "vendor":"%s", """ + """"topology":{"cores":"%s", "threads":"%s", """ + """"sockets":"%s"}, "features":[%s]}""") + cpu_info = t % ('x86', 'model', 'vendor', '2', '1', '4', '"tm"') + + try: + self._driver_dependent_test_setup() + except: + return + + self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', + use_mock_anything=True) + libvirt_conn.LibvirtConnection._conn.compareCPU(mox.IgnoreArg(),0).\ + AndRaise(exception.Invalid('ERR')) + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + self.assertRaises(exception.Invalid, conn.compare_cpu, cpu_info) + self.mox.UnsetStubs() + + def test_ensure_filtering_rules_for_instance_works_correctly(self): + """ensure_filtering_rules_for_instance works as expected correctly""" + + instance_ref = models.Instance() + instance_ref.__setitem__('id', 1) + + try: + nwmock, fwmock = self._driver_dependent_test_setup() + except: + return + + nwmock.setup_basic_filtering(mox.IgnoreArg()) + fwmock.prepare_instance_filter(instance_ref) + self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', + use_mock_anything=True) + n = 'nova-instance-%s' % instance_ref.name + libvirt_conn.LibvirtConnection._conn.nwfilterLookupByName(n) + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + conn.ensure_filtering_rules_for_instance(instance_ref) + self.mox.UnsetStubs() + + def test_ensure_filtering_rules_for_instance_timeout(self): + """ensure_filtering_fules_for_instance finishes with timeout""" + + instance_ref = models.Instance() + instance_ref.__setitem__('id', 1) + + try: + nwmock, fwmock = self._driver_dependent_test_setup() + except: + return + + nwmock.setup_basic_filtering(mox.IgnoreArg()) + fwmock.prepare_instance_filter(instance_ref) + self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', + use_mock_anything=True) + n = 'nova-instance-%s' % instance_ref.name + for i in range(FLAGS.live_migration_timeout_sec * 2): + libvirt_conn.LibvirtConnection._conn.\ + nwfilterLookupByName(n).AndRaise(libvirt.libvirtError('ERR')) + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + try: + conn.ensure_filtering_rules_for_instance(instance_ref) + except exception.Error, e: + c1 = ( 0<=e.message.find('Timeout migrating for')) + self.assertTrue(c1) + self.mox.UnsetStubs() + + def test_live_migration_works_correctly(self): + """_live_migration works as expected correctly """ + + class dummyCall(object): + f = None + def start(self, interval=0, now=False): + pass + + instance_ref = models.Instance() + instance_ref.__setitem__('id', 1) + dest = 'desthost' + ctxt = context.get_admin_context() + + try: + self._driver_dependent_test_setup() + except: + return + + self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', + use_mock_anything=True) + vdmock = self.mox.CreateMock(libvirt.virDomain) + self.mox.StubOutWithMock(vdmock, "migrateToURI", + use_mock_anything=True) + vdmock.migrateToURI(FLAGS.live_migration_uri % dest, mox.IgnoreArg(), + None, FLAGS.live_migration_bandwidth).\ + AndReturn(None) + libvirt_conn.LibvirtConnection._conn.lookupByName(instance_ref.name).\ + AndReturn(vdmock) + # below description is also ok. + #self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection._conn, + # "lookupByName", use_mock_anything=True) + + libvirt_conn.utils.LoopingCall(f=None).AndReturn(dummyCall()) + + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + ret = conn._live_migration(ctxt, instance_ref, dest) + self.assertTrue(ret == None) + self.mox.UnsetStubs() + + def test_live_migration_raises_exception(self): + """ + _live_migration raises exception, then this testcase confirms + state_description/state for the instances/volumes are recovered. + """ + class Instance(models.NovaBase): + id = 0 + volumes = None + name = 'name' + + ctxt = context.get_admin_context() + dest = 'desthost' + instance_ref = Instance() + instance_ref.__setitem__('id', 1) + instance_ref.__setitem__('volumes', [{'id':1}, {'id':2}]) + + try: + nwmock, fwmock = self._driver_dependent_test_setup() + except: + return + + self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', + use_mock_anything=True) + vdmock = self.mox.CreateMock(libvirt.virDomain) + self.mox.StubOutWithMock(vdmock, "migrateToURI", + use_mock_anything=True) + vdmock.migrateToURI(FLAGS.live_migration_uri % dest, mox.IgnoreArg(), + None, FLAGS.live_migration_bandwidth).\ + AndRaise(libvirt.libvirtError('ERR')) + libvirt_conn.LibvirtConnection._conn.lookupByName(instance_ref.name).\ + AndReturn(vdmock) + self.mox.StubOutWithMock(db, 'instance_set_state') + db.instance_set_state(ctxt, instance_ref['id'], + power_state.RUNNING, 'running') + self.mox.StubOutWithMock(db, 'volume_update') + for v in instance_ref.volumes: + db.volume_update(ctxt, v['id'], {'status': 'in-use'}).\ + InAnyOrder('g1') + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + self.assertRaises(libvirt.libvirtError, + conn._live_migration, + ctxt, instance_ref, dest) + self.mox.UnsetStubs() + + def test_post_live_migration_working_correctly(self): + """_post_live_migration works as expected correctly """ + + dest = 'dummydest' + ctxt = context.get_admin_context() + instance_ref = {'id':1, 'hostname':'i-00000001', 'host':dest, + 'fixed_ip':'dummyip', 'floating_ip':'dummyflip', + 'volumes':[{'id':1}, {'id':2} ]} + network_ref = {'id':1, 'host':dest} + floating_ip_ref = {'id':1, 'address':'1.1.1.1'} + + try: + nwmock, fwmock = self._driver_dependent_test_setup() + except: + return + fwmock.unfilter_instance(instance_ref) + + fixed_ip = instance_ref['fixed_ip'] + self.mox.StubOutWithMock(db, 'instance_get_fixed_address') + db.instance_get_fixed_address(ctxt, instance_ref['id']).AndReturn(fixed_ip) + self.mox.StubOutWithMock(db, 'fixed_ip_update') + db.fixed_ip_update(ctxt, fixed_ip, {'host': dest}) + self.mox.StubOutWithMock(db, 'fixed_ip_get_network') + db.fixed_ip_get_network(ctxt, fixed_ip).AndReturn(network_ref) + self.mox.StubOutWithMock(db, 'network_update') + db.network_update(ctxt, network_ref['id'], {'host': dest}) + + fl_ip = instance_ref['floating_ip'] + self.mox.StubOutWithMock(db, 'instance_get_floating_address') + db.instance_get_floating_address(ctxt, instance_ref['id']).AndReturn(fl_ip) + self.mox.StubOutWithMock(db, 'floating_ip_get_by_address') + db.floating_ip_get_by_address(ctxt, instance_ref['floating_ip']).\ + AndReturn(floating_ip_ref) + self.mox.StubOutWithMock(db, 'floating_ip_update') + db.floating_ip_update(ctxt, floating_ip_ref['address'], {'host': dest}) + + self.mox.StubOutWithMock(db, 'instance_update') + db.instance_update(ctxt, instance_ref['id'], + {'state_description': 'running', + 'state': power_state.RUNNING, 'host': dest}) + self.mox.StubOutWithMock(db, 'volume_update') + for v in instance_ref['volumes']: + db.volume_update(ctxt, v['id'], {'status': 'in-use'}) + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + conn._post_live_migration( ctxt, instance_ref, dest) + self.mox.UnsetStubs() + + def test_post_live_migration_no_floating_ip(self): + """ + _post_live_migration works as expected correctly + (in case instance doesnt have floaitng ip) + """ + dest = 'dummydest' + ctxt = context.get_admin_context() + instance_ref = {'id':1, 'hostname':'i-00000001', 'host':dest, + 'fixed_ip':'dummyip', 'floating_ip':'dummyflip', + 'volumes':[{'id':1}, {'id':2} ]} + network_ref = {'id':1, 'host':dest} + floating_ip_ref = {'id':1, 'address':'1.1.1.1'} + + try: + nwmock, fwmock = self._driver_dependent_test_setup() + except: + return + fwmock.unfilter_instance(instance_ref) + + fixed_ip = instance_ref['fixed_ip'] + self.mox.StubOutWithMock(db, 'instance_get_fixed_address') + db.instance_get_fixed_address(ctxt, instance_ref['id']).AndReturn(fixed_ip) + self.mox.StubOutWithMock(db, 'fixed_ip_update') + db.fixed_ip_update(ctxt, fixed_ip, {'host': dest}) + self.mox.StubOutWithMock(db, 'fixed_ip_get_network') + db.fixed_ip_get_network(ctxt, fixed_ip).AndReturn(network_ref) + self.mox.StubOutWithMock(db, 'network_update') + db.network_update(ctxt, network_ref['id'], {'host': dest}) + + self.mox.StubOutWithMock(db, 'instance_get_floating_address') + db.instance_get_floating_address(ctxt, instance_ref['id']).AndReturn(None) + self.mox.StubOutWithMock(libvirt_conn.LOG, 'info') + libvirt_conn.LOG.info(_('post livemigration operation is started..')) + libvirt_conn.LOG.info(_('floating_ip is not found for %s'), + instance_ref['hostname']) + # Checking last messages are ignored. may be no need to check so strictly? + libvirt_conn.LOG.info(mox.IgnoreArg()) + libvirt_conn.LOG.info(mox.IgnoreArg()) + + self.mox.StubOutWithMock(db, 'instance_update') + db.instance_update(ctxt, instance_ref['id'], + {'state_description': 'running', + 'state': power_state.RUNNING, + 'host': dest}) + self.mox.StubOutWithMock(db, 'volume_update') + for v in instance_ref['volumes']: + db.volume_update(ctxt, v['id'], {'status': 'in-use'}) + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + conn._post_live_migration( ctxt, instance_ref, dest) + self.mox.UnsetStubs() + + def test_post_live_migration_no_floating_ip_with_exception(self): + """ + _post_live_migration works as expected correctly + (in case instance doesnt have floaitng ip, and raise exception) + """ + dest = 'dummydest' + ctxt = context.get_admin_context() + instance_ref = {'id':1, 'hostname':'i-00000001', 'host':dest, + 'fixed_ip':'dummyip', 'floating_ip':'dummyflip', + 'volumes':[{'id':1}, {'id':2} ]} + network_ref = {'id':1, 'host':dest} + floating_ip_ref = {'id':1, 'address':'1.1.1.1'} + + try: + nwmock, fwmock = self._driver_dependent_test_setup() + except: + return + fwmock.unfilter_instance(instance_ref) + + fixed_ip = instance_ref['fixed_ip'] + self.mox.StubOutWithMock(db, 'instance_get_fixed_address') + db.instance_get_fixed_address(ctxt, instance_ref['id']).AndReturn(fixed_ip) + self.mox.StubOutWithMock(db, 'fixed_ip_update') + db.fixed_ip_update(ctxt, fixed_ip, {'host': dest}) + self.mox.StubOutWithMock(db, 'fixed_ip_get_network') + db.fixed_ip_get_network(ctxt, fixed_ip).AndReturn(network_ref) + self.mox.StubOutWithMock(db, 'network_update') + db.network_update(ctxt, network_ref['id'], {'host': dest}) + + self.mox.StubOutWithMock(db, 'instance_get_floating_address') + db.instance_get_floating_address(ctxt, instance_ref['id']).\ + AndRaise(exception.NotFound()) + self.mox.StubOutWithMock(libvirt_conn.LOG, 'info') + libvirt_conn.LOG.info(_('post livemigration operation is started..')) + libvirt_conn.LOG.info(_('floating_ip is not found for %s'), + instance_ref['hostname']) + # the last message is ignored. may be no need to check so strictly? + libvirt_conn.LOG.info(mox.IgnoreArg()) + libvirt_conn.LOG.info(mox.IgnoreArg()) + + self.mox.StubOutWithMock(db, 'instance_update') + db.instance_update(ctxt, instance_ref['id'], + {'state_description': 'running', + 'state': power_state.RUNNING, 'host': dest}) + self.mox.StubOutWithMock(db, 'volume_update') + for v in instance_ref['volumes']: + db.volume_update(ctxt, v['id'], {'status': 'in-use'}) + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + conn._post_live_migration( ctxt, instance_ref, dest) + self.mox.UnsetStubs() + def tearDown(self): super(LibvirtConnTestCase, self).tearDown() self.manager.delete_project(self.project) @@ -475,3 +988,4 @@ class NWFilterTestCase(test.TestCase): self.fw.prepare_instance_filter(instance) _ensure_all_called() self.teardown_security_group() + diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 80ae7f34c2d8..f469af681a66 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -316,15 +316,15 @@ class FakeConnection(object): def get_vcpu_number(self): """This method is supported only libvirt. """ - return -1 + return def get_memory_mb(self): """This method is supported only libvirt..""" - return -1 + return def get_local_gb(self): """This method is supported only libvirt..""" - return -1 + return def get_hypervisor_type(self): """This method is supported only libvirt..""" @@ -332,12 +332,16 @@ class FakeConnection(object): def get_hypervisor_version(self): """This method is supported only libvirt..""" - return -1 + return def compare_cpu(self, xml): """This method is supported only libvirt..""" raise NotImplementedError('This method is supported only libvirt.') + def ensure_filtering_rules_for_instance(self, instance_ref): + """This method is supported only libvirt..""" + raise NotImplementedError('This method is supported only libvirt.') + def live_migration(self, context, instance_ref, dest): """This method is supported only libvirt..""" raise NotImplementedError('This method is supported only libvirt.') diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 7d1f76b327a1..49dd03c57c8e 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -861,18 +861,18 @@ class LibvirtConnection(object): def get_cpu_info(self): """ Get cpuinfo information """ - xmlstr = self._conn.getCapabilities() - xml = libxml2.parseDoc(xmlstr) + xml = self._conn.getCapabilities() + xml = libxml2.parseDoc(xml) nodes = xml.xpathEval('//cpu') if len(nodes) != 1: - msg = 'Unexpected xml format. tag "cpu" must be 1, but %d.' \ - % len(nodes) + msg = 'Invalid xml. "" must be 1, but %d.' % len(nodes) msg += '\n' + xml.serialize() raise exception.Invalid(_(msg)) - arch = xml.xpathEval('//cpu/arch')[0].getContent() - model = xml.xpathEval('//cpu/model')[0].getContent() - vendor = xml.xpathEval('//cpu/vendor')[0].getContent() + cpu_info = dict() + cpu_info['arch'] = xml.xpathEval('//cpu/arch')[0].getContent() + cpu_info['model'] = xml.xpathEval('//cpu/model')[0].getContent() + cpu_info['vendor'] = xml.xpathEval('//cpu/vendor')[0].getContent() topology_node = xml.xpathEval('//cpu/topology')[0].get_properties() topology = dict() @@ -890,18 +890,19 @@ class LibvirtConnection(object): feature_nodes = xml.xpathEval('//cpu/feature') features = list() for nodes in feature_nodes: - feature_name = nodes.get_properties().getContent() - features.append(feature_name) + features.append(nodes.get_properties().getContent()) template = ("""{"arch":"%s", "model":"%s", "vendor":"%s", """ """"topology":{"cores":"%s", "threads":"%s", """ """"sockets":"%s"}, "features":[%s]}""") - c = topology['cores'] - s = topology['sockets'] - t = topology['threads'] f = ['"%s"' % x for x in features] - cpu_info = template % (arch, model, vendor, c, s, t, ', '.join(f)) - return cpu_info + return template % (cpu_info['arch'], + cpu_info['model'], + cpu_info['vendor'], + topology['cores'], + topology['sockets'], + topology['threads'], + ', '.join(f)) def block_stats(self, instance_name, disk): """ @@ -935,12 +936,12 @@ class LibvirtConnection(object): def compare_cpu(self, cpu_info): """ - Check the host cpu is compatible to a cpu given by xml. - "xml" must be a part of libvirt.openReadonly().getCapabilities(). - return values follows by virCPUCompareResult. - if 0 > return value, do live migration. + Check the host cpu is compatible to a cpu given by xml. + "xml" must be a part of libvirt.openReadonly().getCapabilities(). + return values follows by virCPUCompareResult. + if 0 > return value, do live migration. - 'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult' + 'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult' """ msg = _('Checking cpu_info: instance was launched this cpu.\n: %s ') LOG.info(msg % cpu_info) @@ -952,7 +953,7 @@ class LibvirtConnection(object): url = 'http://libvirt.org/html/libvirt-libvirt.html' url += '#virCPUCompareResult\n' msg = 'CPU does not have compativility.\n' - msg += 'result:%d \n' + msg += 'result:%s \n' msg += 'Refer to %s' msg = _(msg) @@ -960,7 +961,7 @@ class LibvirtConnection(object): try: ret = self._conn.compareCPU(xml, 0) except libvirt.libvirtError, e: - LOG.error(msg % (ret, url)) + LOG.error(msg % (e.message, url)) raise e if ret <= 0: @@ -969,24 +970,26 @@ class LibvirtConnection(object): return def ensure_filtering_rules_for_instance(self, instance_ref): - """ Setting up inevitable filtering rules on compute node, - and waiting for its completion. - To migrate an instance, filtering rules to hypervisors - and firewalls are inevitable on destination host. - ( Waiting only for filterling rules to hypervisor, - since filtering rules to firewall rules can be set faster). + """ + Setting up inevitable filtering rules on compute node, + and waiting for its completion. + To migrate an instance, filtering rules to hypervisors + and firewalls are inevitable on destination host. + ( Waiting only for filterling rules to hypervisor, + since filtering rules to firewall rules can be set faster). - Concretely, the below method must be called. - - setup_basic_filtering (for nova-basic, etc.) - - prepare_instance_filter(for nova-instance-instance-xxx, etc.) + Concretely, the below method must be called. + - setup_basic_filtering (for nova-basic, etc.) + - prepare_instance_filter(for nova-instance-instance-xxx, etc.) - to_xml may have to be called since it defines PROJNET, PROJMASK. - but libvirt migrates those value through migrateToURI(), - so , no need to be called. + to_xml may have to be called since it defines PROJNET, PROJMASK. + but libvirt migrates those value through migrateToURI(), + so , no need to be called. - Don't use thread for this method since migration should - not be started when setting-up filtering rules operations - are not completed.""" + Don't use thread for this method since migration should + not be started when setting-up filtering rules operations + are not completed. + """ # Tf any instances never launch at destination host, # basic-filtering must be set here. @@ -1009,40 +1012,44 @@ class LibvirtConnection(object): raise exception.Error(msg % (ec2_id, instance_ref.name)) time.sleep(0.5) - def live_migration(self, context, instance_ref, dest): + def live_migration(self, ctxt, instance_ref, dest): """ - Just spawning live_migration operation for - distributing high-load. + Just spawning live_migration operation for + distributing high-load. """ - greenthread.spawn(self._live_migration, context, instance_ref, dest) + greenthread.spawn(self._live_migration, ctxt, instance_ref, dest) - def _live_migration(self, context, instance_ref, dest): + def _live_migration(self, ctxt, instance_ref, dest): """ Do live migration.""" # Do live migration. try: - duri = FLAGS.live_migration_uri % dest - flaglist = FLAGS.live_migration_flag.split(',') flagvals = [getattr(libvirt, x.strip()) for x in flaglist] logical_sum = reduce(lambda x, y: x | y, flagvals) - bandwidth = FLAGS.live_migration_bandwidth - if self.read_only: tmpconn = self._connect(self.libvirt_uri, False) dom = tmpconn.lookupByName(instance_ref.name) - dom.migrateToURI(duri, logical_sum, None, bandwidth) + dom.migrateToURI(FLAGS.live_migration_uri % dest, + logical_sum, + None, + FLAGS.live_migration_bandwidth) tmpconn.close() else: dom = self._conn.lookupByName(instance_ref.name) - dom.migrateToURI(duri, logical_sum, None, bandwidth) + dom.migrateToURI(FLAGS.live_migration_uri % dest, + logical_sum, + None, + FLAGS.live_migration_bandwidth) except Exception, e: - id = instance_ref['id'] - db.instance_set_state(context, id, power_state.RUNNING, 'running') + db.instance_set_state(ctxt, + instance_ref['id'], + power_state.RUNNING, + 'running') for v in instance_ref['volumes']: - db.volume_update(context, + db.volume_update(ctxt, v['id'], {'status': 'in-use'}) @@ -1052,20 +1059,20 @@ class LibvirtConnection(object): timer = utils.LoopingCall(f=None) def wait_for_live_migration(): - + """waiting for live migration completion""" try: - state = self.get_info(instance_ref.name)['state'] + self.get_info(instance_ref.name)['state'] except exception.NotFound: timer.stop() - self._post_live_migration(context, instance_ref, dest) + self._post_live_migration(ctxt, instance_ref, dest) timer.f = wait_for_live_migration timer.start(interval=0.5, now=True) - def _post_live_migration(self, context, instance_ref, dest): + def _post_live_migration(self, ctxt, instance_ref, dest): """ - Post operations for live migration. - Mainly, database updating. + Post operations for live migration. + Mainly, database updating. """ LOG.info('post livemigration operation is started..') # Detaching volumes. @@ -1079,61 +1086,61 @@ class LibvirtConnection(object): 'nova.virt.libvirt_conn.IptablesFirewallDriver': try: self.firewall_driver.unfilter_instance(instance_ref) - except KeyError, e: + except KeyError: pass # Database updating. ec2_id = instance_ref['hostname'] instance_id = instance_ref['id'] - fixed_ip = db.instance_get_fixed_address(context, instance_id) + fixed_ip = db.instance_get_fixed_address(ctxt, instance_id) # Not return if fixed_ip is not found, otherwise, # instance never be accessible.. if None == fixed_ip: logging.warn('fixed_ip is not found for %s ' % ec2_id) - db.fixed_ip_update(context, fixed_ip, {'host': dest}) - network_ref = db.fixed_ip_get_network(context, fixed_ip) - db.network_update(context, network_ref['id'], {'host': dest}) + db.fixed_ip_update(ctxt, fixed_ip, {'host': dest}) + network_ref = db.fixed_ip_get_network(ctxt, fixed_ip) + db.network_update(ctxt, network_ref['id'], {'host': dest}) try: floating_ip \ - = db.instance_get_floating_address(context, instance_id) + = db.instance_get_floating_address(ctxt, instance_id) # Not return if floating_ip is not found, otherwise, # instance never be accessible.. if None == floating_ip: - logging.error('floating_ip is not found for %s ' % ec2_id) + LOG.info(_('floating_ip is not found for %s'), ec2_id) else: - floating_ip_ref = db.floating_ip_get_by_address(context, + floating_ip_ref = db.floating_ip_get_by_address(ctxt, floating_ip) - db.floating_ip_update(context, + db.floating_ip_update(ctxt, floating_ip_ref['address'], {'host': dest}) except exception.NotFound: - logging.debug('%s doesnt have floating_ip.. ' % ec2_id) + LOG.info(_('floating_ip is not found for %s'), ec2_id) except: - msg = 'Live migration: Unexpected error:' - msg += '%s cannot inherit floating ip.. ' % ec2_id - logging.error(_(msg)) + msg = ("""Live migration: Unexpected error:""" + """%s cannot inherit floating ip..""") + LOG.error(_(msg), ec2_id) # Restore instance/volume state - db.instance_update(context, + db.instance_update(ctxt, instance_id, {'state_description': 'running', 'state': power_state.RUNNING, 'host': dest}) for v in instance_ref['volumes']: - db.volume_update(context, + db.volume_update(ctxt, v['id'], {'status': 'in-use'}) - logging.info(_('Live migrating %s to %s finishes successfully') + LOG.info(_('Live migrating %s to %s finishes successfully') % (ec2_id, dest)) msg = _(("""Known error: the below error is nomally occurs.\n""" """Just check if iinstance is successfully migrated.\n""" """libvir: QEMU error : Domain not found: no domain """ """with matching name..""")) - logging.info(msg) + LOG.info(msg) class FirewallDriver(object): diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index c10f73fe7164..1e7933f51455 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -215,15 +215,15 @@ class XenAPIConnection(object): def get_vcpu_number(self): """This method is supported only libvirt. """ - return -1 + return def get_memory_mb(self): """This method is supported only libvirt..""" - return -1 + return def get_local_gb(self): """This method is supported only libvirt..""" - return -1 + return def get_hypervisor_type(self): """This method is supported only libvirt..""" @@ -231,12 +231,18 @@ class XenAPIConnection(object): def get_hypervisor_version(self): """This method is supported only libvirt..""" - return -1 + return def compare_cpu(self, xml): + """This method is supported only libvirt..""" + raise NotImplementedError('This method is supported only libvirt.') + + def ensure_filtering_rules_for_instance(self, instance_ref): + """This method is supported only libvirt..""" raise NotImplementedError('This method is supported only libvirt.') def live_migration(self, context, instance_ref, dest): + """This method is supported only libvirt..""" raise NotImplementedError('This method is supported only libvirt.') diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 1735d79ebc21..906eb86ea287 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -153,6 +153,6 @@ class VolumeManager(manager.Manager): def check_for_export(self, context, instance_id): """Make sure whether volume is exported.""" if FLAGS.volume_driver == 'nova.volume.driver.AOEDriver': - instance_ref = self.db.instance_get(instance_id) + instance_ref = self.db.instance_get(context, instance_id) for v in instance_ref['volumes']: self.driver.check_for_export(context, v['id']) From 09f2c4729456443c4874a8cadc53299817d6371a Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Mon, 31 Jan 2011 18:41:10 +0900 Subject: [PATCH 02/76] 1. Discard nova-manage host list Reason: nova-manage service list can be replacement. Changes: nova-manage 2. Fix resource checking inappropriate design. Reason: nova.scheduler.driver.has_enough_resource has inappropriate design, so fix it. This method didnt check free memory but check total memory. We need to register free memory onto databases(periodically). But periodically updating may causes flooding request to db in case of many compute-node. Currently, since memory information is only used in this feature, we take the choice that administrators manually has to execute nova-manage to let compute node update their own memory information. Changes: nova.db.sqlalchemy.models - Adding memory_mb_used, local_gb_used, vcpu_used column to Service. (local_gb and vcpu is just for reference to admins for now) nova.compute.manager - Changing nova.compute.manager.update_service Service table column is changed, so updating method must be changed. - Adding nova.compute.manager.update_available_resource a responder to admin's request to let compute nodes update their memory infomation nova.virt.libvirt_conn nova.virt.xenapi_conn nova.virt.fake - Adding getter method for memory_mb_used/local_gb_used/vcpu_used. nova-manage - request method to let compute nodes update their own memory info. --- bin/nova-manage | 92 ++++++++-------- nova/compute/manager.py | 54 +++++++++- nova/db/sqlalchemy/models.py | 5 +- nova/rpc.py | 3 + nova/scheduler/driver.py | 67 ++++++++---- nova/scheduler/manager.py | 11 +- nova/tests/test_compute.py | 55 ++++++++-- nova/tests/test_scheduler.py | 203 ++++++++++++++++++++--------------- nova/tests/test_virt.py | 86 ++++++++++++++- nova/utils.py | 18 ++++ nova/virt/fake.py | 22 +++- nova/virt/libvirt_conn.py | 37 +++++-- nova/virt/xenapi_conn.py | 22 +++- 13 files changed, 490 insertions(+), 185 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 1ad3120b8e0e..2831e273e43a 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -499,50 +499,6 @@ class InstanceCommands(object): print msg -class HostCommands(object): - """Class for mangaging host(physical nodes).""" - - def list(self): - """describe host list.""" - - # To supress msg: No handlers could be found for logger "amqplib" - logging.basicConfig() - - service_refs = db.service_get_all(context.get_admin_context()) - hosts = [h['host'] for h in service_refs] - hosts = list(set(hosts)) - for host in hosts: - print host - - def show(self, host): - """describe cpu/memory/hdd info for host.""" - - result = rpc.call(context.get_admin_context(), - FLAGS.scheduler_topic, - {"method": "show_host_resource", - "args": {"host": host}}) - - # Checking result msg format is necessary, that will have done - # when this feture is included in API. - if type(result) != dict: - print 'Unexpected error occurs' - elif not result['ret']: - print '%s' % result['msg'] - else: - cpu = result['phy_resource']['vcpus'] - mem = result['phy_resource']['memory_mb'] - hdd = result['phy_resource']['local_gb'] - - print 'HOST\t\tPROJECT\t\tcpu\tmem(mb)\tdisk(gb)' - print '%s\t\t\t%s\t%s\t%s' % (host, cpu, mem, hdd) - for p_id, val in result['usage'].items(): - print '%s\t%s\t\t%s\t%s\t%s' % (host, - p_id, - val['vcpus'], - val['memory_mb'], - val['local_gb']) - - class ServiceCommands(object): """Enable and disable running services""" @@ -587,6 +543,53 @@ class ServiceCommands(object): return db.service_update(ctxt, svc['id'], {'disabled': True}) + def describeresource(self, host): + """describe cpu/memory/hdd info for host.""" + + result = rpc.call(context.get_admin_context(), + FLAGS.scheduler_topic, + {"method": "show_host_resource", + "args": {"host": host}}) + + # Checking result msg format is necessary, that will have done + # when this feture is included in API. + if type(result) != dict: + print 'Unexpected error occurs' + elif not result['ret']: + print '%s' % result['msg'] + else: + cpu = result['phy_resource']['vcpus'] + mem = result['phy_resource']['memory_mb'] + hdd = result['phy_resource']['local_gb'] + cpu_u = result['phy_resource']['vcpus_used'] + mem_u = result['phy_resource']['memory_mb_used'] + hdd_u = result['phy_resource']['local_gb_used'] + + print 'HOST\t\t\tPROJECT\t\tcpu\tmem(mb)\tdisk(gb)' + print '%s(total)\t\t\t%s\t%s\t%s' % (host, cpu, mem, hdd) + print '%s(used)\t\t\t%s\t%s\t%s' % (host, cpu_u, mem_u, hdd_u) + for p_id, val in result['usage'].items(): + print '%s\t\t%s\t\t%s\t%s\t%s' % (host, + p_id, + val['vcpus'], + val['memory_mb'], + val['local_gb']) + + def updateresource(self, host): + """update available vcpu/memory/disk info for host.""" + + ctxt = context.get_admin_context() + service_refs = db.service_get_all_by_host(ctxt, host) + if len(service_refs) <= 0: + raise exception.Invalid(_('%s does not exists.') % host) + + service_refs = [s for s in service_refs if s['topic'] == 'compute'] + if len(service_refs) <= 0: + raise exception.Invalid(_('%s is not compute node.') % host) + + result = rpc.call(ctxt, db.queue_get_for(ctxt, FLAGS.compute_topic, host), + {"method": "update_available_resource"}) + class LogCommands(object): def request(self, request_id, logfile='/var/log/nova.log'): @@ -606,7 +609,6 @@ CATEGORIES = [ ('floating', FloatingIpCommands), ('network', NetworkCommands), ('instance', InstanceCommands), - ('host', HostCommands), ('service', ServiceCommands), ('log', LogCommands)] diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 4acba71530d5..e3c5d24b64ee 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -134,9 +134,12 @@ class ComputeManager(manager.Manager): raise exception.Invalid(msg) # Updating host information - vcpu = self.driver.get_vcpu_number() - memory_mb = self.driver.get_memory_mb() - local_gb = self.driver.get_local_gb() + vcpu = self.driver.get_vcpu_total() + memory_mb = self.driver.get_memory_mb_total() + local_gb = self.driver.get_local_gb_total() + vcpu_u = self.driver.get_vcpu_used() + memory_mb_u = self.driver.get_memory_mb_used() + local_gb_u = self.driver.get_local_gb_used() hypervisor = self.driver.get_hypervisor_type() version = self.driver.get_hypervisor_version() cpu_info = self.driver.get_cpu_info() @@ -146,10 +149,42 @@ class ComputeManager(manager.Manager): {'vcpus': vcpu, 'memory_mb': memory_mb, 'local_gb': local_gb, + 'vcpus_used':vcpu_u, + 'memory_mb_used': memory_mb_u, + 'local_gb_used': local_gb_u, 'hypervisor_type': hypervisor, 'hypervisor_version': version, 'cpu_info': cpu_info}) + def update_available_resource(self, context): + """ + update compute node specific info to DB. + Alghough this might be subset of update_service, + udpate_service() is used only nova-compute is lauched. + On the other hand, this method is used whenever administrators + request comes. + """ + try: + service_ref = self.db.service_get_by_args(context, + self.host, + 'nova-compute') + except exception.NotFound: + msg = _(("""Cannot update resource info.""" + """ Because no service record found.""")) + raise exception.Invalid(msg) + + # Updating host information + vcpu_u = self.driver.get_vcpu_used() + memory_mb_u = self.driver.get_memory_mb_used() + local_gb_u = self.driver.get_local_gb_used() + + self.db.service_update(context, + service_ref['id'], + {'vcpus_used':vcpu_u, + 'memory_mb_used': memory_mb_u, + 'local_gb_used': local_gb_u}) + return + def _update_state(self, context, instance_id): """Update the state of an instance from the driver info.""" # FIXME(ja): include other fields from state? @@ -596,6 +631,19 @@ class ComputeManager(manager.Manager): """ Check the host cpu is compatible to a cpu given by xml.""" return self.driver.compare_cpu(cpu_info) + def mktmpfile(self, context): + """make tmpfile under FLAGS.instance_path.""" + return utils.mktmpfile(FLAGS.instances_path) + + def exists(self, context, path): + """Confirm existence of the tmpfile given by path.""" + if not utils.exists(path): + raise exception.NotFound(_('%s not found') % path) + + def remove(self, context, path): + """remove the tmpfile given by path.""" + return utils.remove(path) + def pre_live_migration(self, context, instance_id): """Any preparation for live migration at dst host.""" diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 7c40d55968a0..217b14bf7efa 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -164,7 +164,10 @@ class Service(BASE, NovaBase): vcpus = Column(Integer, nullable=True) memory_mb = Column(Integer, nullable=True) local_gb = Column(Integer, nullable=True) - hypervisor_type = Column(String(128), nullable=True) + vcpus_used = Column(Integer, nullable=True) + memory_mb_used = Column(Integer, nullable=True) + local_gb_used = Column(Integer, nullable=True) + hypervisor_type = Column(Text(), nullable=True) hypervisor_version = Column(Integer, nullable=True) # Note(masumotok): Expected Strings example: # diff --git a/nova/rpc.py b/nova/rpc.py index 49b11602bdeb..cf40040792da 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -250,6 +250,9 @@ def msg_reply(msg_id, reply=None, failure=None): try: publisher.send({'result': reply, 'failure': failure}) except TypeError: + print '>>>>>>>>>>>>>>>>>>' + print reply + print '>>>>>>>>>>>>>>>>>>' publisher.send( {'result': dict((k, repr(v)) for k, v in reply.__dict__.iteritems()), diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index d4ad4238859d..937f09c6fc18 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -33,6 +33,7 @@ from nova.compute import power_state FLAGS = flags.FLAGS flags.DEFINE_integer('service_down_time', 60, 'maximum time since last checkin for up service') +flags.DECLARE('instances_path', 'nova.compute.manager') class NoValidHost(exception.Error): @@ -163,6 +164,8 @@ class Scheduler(object): http://wiki.libvirt.org/page/TodoPreMigrationChecks """ + # Checking shared storage connectivity + self.mounted_on_same_shared_storage(context, instance_ref, dest) # Checking dest exists. dservice_refs = db.service_get_all_by_host(context, dest) @@ -207,38 +210,60 @@ class Scheduler(object): raise e def has_enough_resource(self, context, instance_ref, dest): - """Check if destination host has enough resource for live migration""" + """ + Check if destination host has enough resource for live migration. + Currently, only memory checking has been done. + If storage migration(block migration, meaning live-migration + without any shared storage) will be available, local storage + checking is also necessary. + """ # Getting instance information ec2_id = instance_ref['hostname'] - vcpus = instance_ref['vcpus'] mem = instance_ref['memory_mb'] - hdd = instance_ref['local_gb'] - # Gettin host information + # Getting host information service_refs = db.service_get_all_by_host(context, dest) if len(service_refs) <= 0: raise exception.Invalid(_('%s does not exists.') % dest) service_ref = service_refs[0] - total_cpu = int(service_ref['vcpus']) - total_mem = int(service_ref['memory_mb']) - total_hdd = int(service_ref['local_gb']) + mem_total = int(service_ref['memory_mb']) + mem_used = int(service_ref['memory_mb_used']) + mem_avail = mem_total - mem_used + mem_inst = instance_ref['memory_mb'] + if mem_avail <= mem_inst: + msg = _('%s is not capable to migrate %s(host:%s <= instance:%s)') + raise exception.NotEmpty(msg % (dest, ec2_id, mem_avail, mem_inst)) - instances_refs = db.instance_get_all_by_host(context, dest) - for i_ref in instances_refs: - total_cpu -= int(i_ref['vcpus']) - total_mem -= int(i_ref['memory_mb']) - total_hdd -= int(i_ref['local_gb']) + def mounted_on_same_shared_storage(self, context, instance_ref, dest): + """ + Check if /nova-inst-dir/insntances is mounted same storage at + live-migration src and dest host. + """ + src = instance_ref['host'] + dst_t = db.queue_get_for(context, FLAGS.compute_topic, dest) + src_t = db.queue_get_for(context, FLAGS.compute_topic, src) - # Checking host has enough information - logging.debug(_('host(%s) remains vcpu:%s mem:%s hdd:%s,') % - (dest, total_cpu, total_mem, total_hdd)) - logging.debug(_('instance(%s) has vcpu:%s mem:%s hdd:%s,') % - (ec2_id, vcpus, mem, hdd)) + # create tmpfile at dest host + try: + filename = rpc.call(context, dst_t, {"method": 'mktmpfile'}) + except rpc.RemoteError, e: + msg = _("Cannot create tmpfile at %s to confirm shared storage.") + logging.error(msg % FLAGS.instance_path) + raise e - if total_cpu <= vcpus or total_mem <= mem or total_hdd <= hdd: - raise exception.NotEmpty(_('%s is not capable to migrate %s') % - (dest, ec2_id)) + # make sure existence at src host. + try: + rpc.call(context, src_t, + {"method": 'exists', "args":{'path':filename}}) - logging.debug(_('%s has_enough_resource() for %s') % (dest, ec2_id)) + except (rpc.RemoteError, exception.NotFound), e: + msg = (_("""Cannot comfirm %s at %s to confirm shared storage.""" + """Check if %s is same shared storage""")) + logging.error(msg % FLAGS.instance_path) + raise e + + # then remove. + rpc.call(context, dst_t, + {"method": 'remove', "args":{'path':filename}}) diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index a181225a6798..b40f46a8561b 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -84,7 +84,10 @@ class SchedulerManager(manager.Manager): # Getting physical resource information h_resource = {'vcpus': service_ref['vcpus'], 'memory_mb': service_ref['memory_mb'], - 'local_gb': service_ref['local_gb']} + 'local_gb': service_ref['local_gb'], + 'vcpus_used': service_ref['vcpus_used'], + 'memory_mb_used': service_ref['memory_mb_used'], + 'local_gb_used': service_ref['local_gb_used']} # Getting usage resource information u_resource = {} @@ -108,8 +111,8 @@ class SchedulerManager(manager.Manager): hdd = db.instance_get_disk_sum_by_host_and_project(context, host, p_id) - u_resource[p_id] = {'vcpus': vcpus, - 'memory_mb': mem, - 'local_gb': hdd} + u_resource[p_id] = {'vcpus': int(vcpus), + 'memory_mb': int(mem), + 'local_gb': int(hdd)} return {'ret': True, 'phy_resource': h_resource, 'usage': u_resource} diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 344c2d2b5c39..8d3ac315d2b9 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -268,7 +268,8 @@ class ComputeTestCase(test.TestCase): """ def dic_key_check(dic): - validkey = ['vcpus', 'memory_mb', 'local_gb', + validkey = ['vcpus', 'memory_mb', 'local_gb', + 'vcpus_used', 'memory_mb_used', 'local_gb_used', 'hypervisor_type', 'hypervisor_version', 'cpu_info'] return (list(set(validkey)) == list(set(dic.keys()))) @@ -286,13 +287,55 @@ class ComputeTestCase(test.TestCase): self.compute.db = dbmock self.mox.ReplayAll() + self.compute.update_service('dummy', host, binary) + self.mox.ResetAll() + + def test_update_available_resource_exception(self): + """a testcase of update_available_resource raises exception""" + host = 'foo' + binary = 'nova-compute' + ctxt = context.get_admin_context() + dbmock = self.mox.CreateMock(db) + dbmock.service_get_by_args(mox.IgnoreArg(), + mox.StrContains(host), + mox.StrContains(binary)).\ + AndRaise(exception.NotFound()) + self.compute.db = dbmock + self.compute.host = host + self.mox.ReplayAll() try: - self.compute.update_service('dummy', host, binary) + self.compute.update_available_resource(ctxt) except exception.Invalid, e: - msg = 'Cannot insert compute manager specific info' + msg = 'Cannot update resource info.' c1 = ( 0 <= e.message.find(msg)) self.assertTrue(c1) - self.mox.ResetAll() + self.mox.UnsetStubs() + + def test_update_available_resource_success(self): + """a testcase of update_available_resource finishes with no errors""" + + def dic_key_check(dic): + validkey = [ 'vcpus_avail', 'memory_mb_avail', 'local_gb_avail'] + return (list(set(validkey)) == list(set(dic.keys()))) + + host = 'foo' + binary = 'nova-compute' + ctxt = context.get_admin_context() + service_ref = {'id':1, 'binary':'nova-compute', 'topic':'compute'} + dbmock = self.mox.CreateMock(db) + dbmock.service_get_by_args(mox.IgnoreArg(), + mox.StrContains(host), + mox.StrContains(binary)).\ + AndReturn(service_ref) + dbmock.service_update(mox.IgnoreArg(), + service_ref['id'], + mox.Func(dic_key_check)) + + self.compute.db = dbmock + self.compute.host = host + self.mox.ReplayAll() + self.compute.update_available_resource(ctxt) + self.mox.UnsetStubs() def _setup_other_managers(self): self.volume_manager = utils.import_object(FLAGS.volume_manager) @@ -444,7 +487,7 @@ class ComputeTestCase(test.TestCase): rpc.call(c, db.queue_get_for(c, FLAGS.compute_topic, dest), {"method": "pre_live_migration", "args": {'instance_id': i_id}}).\ - InAnyOrder('g1').AndRaise(rpc.RemoteError('du', 'mm', 'y')) + InAnyOrder('g1').AndRaise(rpc.RemoteError('', '', '')) self.mox.StubOutWithMock(compute_manager.LOG, 'error') compute_manager.LOG.error('Pre live migration for %s failed at %s', instance_ref['hostname'], dest) @@ -480,7 +523,7 @@ class ComputeTestCase(test.TestCase): rpc.call(c, compute_topic, {"method": "pre_live_migration", "args": {'instance_id': i_id}}).\ - AndRaise(rpc.RemoteError('du', 'mm', 'y')) + AndRaise(rpc.RemoteError('', '', '')) self.mox.StubOutWithMock(compute_manager.LOG, 'error') compute_manager.LOG.error('Pre live migration for %s failed at %s', instance_ref['hostname'], dest) diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index c62bca9b1238..36d99d666b70 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -41,6 +41,7 @@ from nova.db.sqlalchemy import models FLAGS = flags.FLAGS flags.DECLARE('max_cores', 'nova.scheduler.simple') flags.DECLARE('stub_network', 'nova.compute.manager') +flags.DECLARE('instances_path', 'nova.compute.manager') class TestDriver(driver.Scheduler): @@ -111,7 +112,8 @@ class SchedulerTestCase(test.TestCase): scheduler = manager.SchedulerManager() dest = 'dummydest' ctxt = context.get_admin_context() - r0 = {'vcpus':16, 'memory_mb':32, 'local_gb':100} + r0 = {'vcpus':16, 'memory_mb':32, 'local_gb':100, + 'vcpus_used':16, 'memory_mb_used':32, 'local_gb_used':10} service_ref = {'id':1, 'host':dest} service_ref.update(r0) @@ -140,7 +142,8 @@ class SchedulerTestCase(test.TestCase): scheduler = manager.SchedulerManager() dest = 'dummydest' ctxt = context.get_admin_context() - r0 = {'vcpus':16, 'memory_mb':32, 'local_gb':100} + r0 = {'vcpus':16, 'memory_mb':32, 'local_gb':100, + 'vcpus_used':16, 'memory_mb_used':32, 'local_gb_used':10} r1 = {'vcpus':10, 'memory_mb':4, 'local_gb':20} r2 = {'vcpus':10, 'memory_mb':20, 'local_gb':30} service_ref = {'id':1, 'host':dest} @@ -148,7 +151,7 @@ class SchedulerTestCase(test.TestCase): instance_ref2 = {'id':2, 'project_id':'p-01', 'host':'dummy'} instance_ref2.update(r1) instance_ref3 = {'id':3, 'project_id':'p-02', 'host':'dummy'} - instance_ref3.update(r1) + instance_ref3.update(r2) self.mox.StubOutWithMock(manager, 'db', use_mock_anything=True) manager.db.service_get_all_compute_sorted(mox.IgnoreArg()).\ @@ -176,6 +179,7 @@ class SchedulerTestCase(test.TestCase): self.assertTrue( c1 and c2 and c3 and c4 and c5 and c6) self.mox.UnsetStubs() + class ZoneSchedulerTestCase(test.TestCase): """Test case for zone scheduler""" def setUp(self): @@ -495,7 +499,7 @@ class SimpleDriverTestCase(test.TestCase): ctxt = context.get_admin_context() topic = FLAGS.compute_topic i_ref = {'id':1, 'hostname':'i-00000001', 'host':'dummy', - 'volumes':[{'id':1}, {'id':2}]} + 'volumes':[{'id':1}, {'id':2}]} dest = 'dummydest' self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) @@ -793,7 +797,10 @@ class SimpleDriverTestCase(test.TestCase): ctxt = context.get_admin_context() topic = FLAGS.compute_topic i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'} + driver_i = self.scheduler.driver + self.mox.StubOutWithMock(driver_i, 'mounted_on_same_shared_storage') + driver_i.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ AndReturn([]) @@ -813,6 +820,7 @@ class SimpleDriverTestCase(test.TestCase): Original host(an instance launched on) does not exist. """ dest = 'dummydest' + driver_i = self.scheduler.driver ctxt = context.get_admin_context() topic = FLAGS.compute_topic i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', 'launched_on':'h1'} @@ -821,6 +829,8 @@ class SimpleDriverTestCase(test.TestCase): service_ref.__setitem__('topic', 'compute') service_ref.__setitem__('host', i_ref['host']) + self.mox.StubOutWithMock(driver_i, 'mounted_on_same_shared_storage') + driver_i.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ AndReturn([service_ref]) @@ -844,6 +854,7 @@ class SimpleDriverTestCase(test.TestCase): Original host and dest host has different hypervisor type. """ dest = 'dummydest' + driver_i = self.scheduler.driver ctxt = context.get_admin_context() topic = FLAGS.compute_topic i_ref = {'id':1, 'hostname':'i-01', @@ -856,6 +867,8 @@ class SimpleDriverTestCase(test.TestCase): service_ref2.__setitem__('id', 2) service_ref2.__setitem__('hypervisor_type', 'xen') + self.mox.StubOutWithMock(driver_i, 'mounted_on_same_shared_storage') + driver_i.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ AndReturn([service_ref]) @@ -879,6 +892,7 @@ class SimpleDriverTestCase(test.TestCase): Original host and dest host has different hypervisor version. """ dest = 'dummydest' + driver_i = self.scheduler.driver ctxt = context.get_admin_context() topic = FLAGS.compute_topic i_ref = {'id':1, 'hostname':'i-01', @@ -891,6 +905,8 @@ class SimpleDriverTestCase(test.TestCase): service_ref2.__setitem__('id', 2) service_ref2.__setitem__('hypervisor_version', 12001) + self.mox.StubOutWithMock(driver_i, 'mounted_on_same_shared_storage') + driver_i.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ AndReturn([service_ref]) @@ -914,6 +930,7 @@ class SimpleDriverTestCase(test.TestCase): Original host and dest host has different hypervisor version. """ dest = 'dummydest' + driver_i = self.scheduler.driver ctxt = context.get_admin_context() topic = FLAGS.compute_topic i_ref = {'id':1, 'hostname':'i-01', @@ -927,6 +944,8 @@ class SimpleDriverTestCase(test.TestCase): service_ref2.__setitem__('hypervisor_version', 12000) service_ref2.__setitem__('cpuinfo', 'info') + self.mox.StubOutWithMock(driver_i, 'mounted_on_same_shared_storage') + driver_i.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ AndReturn([service_ref]) @@ -956,6 +975,7 @@ class SimpleDriverTestCase(test.TestCase): The testcase make sure everything finished with no error. """ dest = 'dummydest' + driver_i = self.scheduler.driver ctxt = context.get_admin_context() topic = FLAGS.compute_topic i_ref = {'id':1, 'hostname':'i-01', @@ -969,6 +989,8 @@ class SimpleDriverTestCase(test.TestCase): service_ref2.__setitem__('hypervisor_version', 12000) service_ref2.__setitem__('cpuinfo', 'info') + self.mox.StubOutWithMock(driver_i, 'mounted_on_same_shared_storage') + driver_i.mounted_on_same_shared_storage(mox.IgnoreArg(), i_ref, dest) self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ AndReturn([service_ref]) @@ -988,36 +1010,6 @@ class SimpleDriverTestCase(test.TestCase): self.assertTrue(ret == None) self.mox.UnsetStubs() - def test_has_enough_resource_lack_resource_vcpu(self): - """ - A testcase of driver.has_enough_resource. - Lack of vcpu.(boundary check) - """ - dest = 'dummydest' - ctxt = context.get_admin_context() - topic = FLAGS.compute_topic - service_ref = {'id':1, 'vcpus':16, 'memory_mb':32, 'local_gb':100} - i_ref1 = {'id':1, 'hostname':'i-01', 'host':'dummy', - 'vcpus':6, 'memory_mb':8, 'local_gb':10} - i_ref2 = {'id':2, 'hostname':'i-01', 'host':'dummy', - 'vcpus':5, 'memory_mb':8, 'local_gb':10} - i_ref3 = {'id':3, 'hostname':'i-02', 'host':'dummy', - 'vcpus':5, 'memory_mb':8, 'local_gb':10} - - self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) - driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ - AndReturn([service_ref]) - driver.db.instance_get_all_by_host(mox.IgnoreArg(), dest).\ - AndReturn([i_ref2, i_ref3]) - - self.mox.ReplayAll() - try: - self.scheduler.driver.has_enough_resource(ctxt, i_ref1, dest) - except exception.NotEmpty, e: - msg = 'is not capable to migrate' - self.assertTrue(e.message.find(msg) >= 0) - self.mox.UnsetStubs() - def test_has_enough_resource_lack_resource_memory(self): """ A testcase of driver.has_enough_resource. @@ -1026,60 +1018,23 @@ class SimpleDriverTestCase(test.TestCase): dest = 'dummydest' ctxt = context.get_admin_context() topic = FLAGS.compute_topic - service_ref = {'id':1, 'vcpus':16, 'memory_mb':32, 'local_gb':100} - i_ref1 = {'id':1, 'hostname':'i-01', 'host':'dummy', - 'vcpus':5, 'memory_mb':16, 'local_gb':10} - i_ref2 = {'id':2, 'hostname':'i-01', 'host':'dummy', - 'vcpus':5, 'memory_mb':8, 'local_gb':10} - i_ref3 = {'id':3, 'hostname':'i-02', 'host':'dummy', - 'vcpus':5, 'memory_mb':8, 'local_gb':10} + service_ref = {'id':1, 'memory_mb':32, 'memory_mb_used':12, 'local_gb':100} + i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', + 'vcpus':5, 'memory_mb':20, 'local_gb':10} self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ AndReturn([service_ref]) - driver.db.instance_get_all_by_host(mox.IgnoreArg(), dest).\ - AndReturn([i_ref2, i_ref3]) self.mox.ReplayAll() try: - self.scheduler.driver.has_enough_resource(ctxt, i_ref1, dest) + self.scheduler.driver.has_enough_resource(ctxt, i_ref, dest) except exception.NotEmpty, e: msg = 'is not capable to migrate' self.assertTrue(e.message.find(msg) >= 0) self.mox.UnsetStubs() self.mox.UnsetStubs() - def test_has_enough_resource_lack_resource_disk(self): - """ - A testcase of driver.has_enough_resource. - Lack of local_gb.(boundary check) - """ - scheduler = manager.SchedulerManager() - dest = 'dummydest' - ctxt = context.get_admin_context() - topic = FLAGS.compute_topic - service_ref = {'id':1, 'vcpus':16, 'memory_mb':32, 'local_gb':100} - i_ref1 = {'id':1, 'hostname':'i-01', 'host':'dummy', - 'vcpus':5, 'memory_mb':8, 'local_gb':80} - i_ref2 = {'id':2, 'hostname':'i-01', 'host':'dummy', - 'vcpus':5, 'memory_mb':8, 'local_gb':10} - i_ref3 = {'id':3, 'hostname':'i-02', 'host':'dummy', - 'vcpus':5, 'memory_mb':8, 'local_gb':10} - - self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) - driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ - AndReturn([service_ref]) - driver.db.instance_get_all_by_host(mox.IgnoreArg(), dest).\ - AndReturn([i_ref2, i_ref3]) - - self.mox.ReplayAll() - try: - self.scheduler.driver.has_enough_resource(ctxt, i_ref1, dest) - except exception.NotEmpty, e: - msg = 'is not capable to migrate' - self.assertTrue(e.message.find(msg) >= 0) - self.mox.UnsetStubs() - def test_has_enough_resource_works_correctly(self): """ A testcase of driver.has_enough_resource @@ -1088,21 +1043,101 @@ class SimpleDriverTestCase(test.TestCase): dest = 'dummydest' ctxt = context.get_admin_context() topic = FLAGS.compute_topic - service_ref = {'id':1, 'vcpus':16, 'memory_mb':32, 'local_gb':100} - i_ref1 = {'id':1, 'hostname':'i-01', 'host':'dummy', - 'vcpus':5, 'memory_mb':8, 'local_gb':10} - i_ref2 = {'id':2, 'hostname':'i-01', 'host':'dummy', - 'vcpus':5, 'memory_mb':8, 'local_gb':10} - i_ref3 = {'id':3, 'hostname':'i-02', 'host':'dummy', + service_ref = {'id':1, 'memory_mb':120, 'memory_mb_used':32} + i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy', 'vcpus':5, 'memory_mb':8, 'local_gb':10} self.mox.StubOutWithMock(driver, 'db', use_mock_anything=True) driver.db.service_get_all_by_host(mox.IgnoreArg(), dest).\ AndReturn([service_ref]) - driver.db.instance_get_all_by_host(mox.IgnoreArg(), dest).\ - AndReturn([i_ref2, i_ref3]) self.mox.ReplayAll() - ret = self.scheduler.driver.has_enough_resource(ctxt, i_ref1, dest) + ret = self.scheduler.driver.has_enough_resource(ctxt, i_ref, dest) + self.assertTrue(ret == None) + self.mox.UnsetStubs() + + def test_mounted_on_same_shared_storage_cannot_make_tmpfile(self): + """ + A testcase of driver.mounted_on_same_shared_storage + checks log message when dest host cannot make tmpfile. + """ + dest = 'dummydest' + driver_i = self.scheduler.driver + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + fpath = '/test/20110127120000' + i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'} + + self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True) + driver.rpc.call(mox.IgnoreArg(), + db.queue_get_for(ctxt, FLAGS.compute_topic, dest), + {"method": 'mktmpfile'}).AndRaise(rpc.RemoteError('', '', '')) + self.mox.StubOutWithMock(driver.logging, 'error') + msg = _("Cannot create tmpfile at %s to confirm shared storage.") + driver.logging.error(msg % FLAGS.instances_path) + + self.mox.ReplayAll() + self.assertRaises(rpc.RemoteError, + driver_i.mounted_on_same_shared_storage, + ctxt, i_ref, dest) + self.mox.UnsetStubs() + + def test_mounted_on_same_shared_storage_cannot_comfirm_tmpfile(self): + """ + A testcase of driver.mounted_on_same_shared_storage + checks log message when src host cannot comfirm tmpfile. + """ + dest = 'dummydest' + driver_i = self.scheduler.driver + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + fpath = '/test/20110127120000' + i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'} + + self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True) + driver.rpc.call(mox.IgnoreArg(), + db.queue_get_for(ctxt, FLAGS.compute_topic, dest), + {"method": 'mktmpfile'}).AndReturn(fpath) + driver.rpc.call(mox.IgnoreArg(), + db.queue_get_for(ctxt, FLAGS.compute_topic, i_ref['host']), + {"method": 'exists', "args":{'path':fpath}}).\ + AndRaise(rpc.RemoteError('','','')) + self.mox.StubOutWithMock(driver.logging, 'error') + msg = _("Cannot create tmpfile at %s to confirm shared storage.") + driver.logging.error(msg % FLAGS.instances_path) + + self.mox.ReplayAll() + self.assertRaises(rpc.RemoteError, + driver_i.mounted_on_same_shared_storage, + ctxt, i_ref, dest) + self.mox.UnsetStubs() + + + def test_mounted_on_same_shared_storage_works_correctly(self): + """ + A testcase of driver.mounted_on_same_shared_storage + to make sure everything finished with no error. + """ + dest = 'dummydest' + ctxt = context.get_admin_context() + topic = FLAGS.compute_topic + fpath = '/test/20110127120000' + i_ref = {'id':1, 'hostname':'i-01', 'host':'dummy'} + + self.mox.StubOutWithMock(driver, 'rpc', use_mock_anything=True) + driver.rpc.call(mox.IgnoreArg(), + db.queue_get_for(mox.IgnoreArg(), FLAGS.compute_topic, dest), + {"method": 'mktmpfile'}).AndReturn(fpath) + driver.rpc.call(mox.IgnoreArg(), + db.queue_get_for(mox.IgnoreArg(), FLAGS.compute_topic, i_ref['host']), + {"method": 'exists', "args":{'path':fpath}}) + driver.rpc.call(mox.IgnoreArg(), + db.queue_get_for(mox.IgnoreArg(), FLAGS.compute_topic, dest), + {"method": 'remove', "args":{'path':fpath}}) + + self.mox.ReplayAll() + ret = self.scheduler.driver.mounted_on_same_shared_storage(ctxt, + i_ref, + dest) self.assertTrue(ret == None) self.mox.UnsetStubs() diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 177e8f02192e..2828baced68f 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -241,11 +241,11 @@ class LibvirtConnTestCase(test.TestCase): uri = conn.get_uri() self.assertEquals(uri, testuri) - def test_get_memory_mb(self): + def test_get_vcpu_total(self): """ - Check if get_memory_mb returns memory value + Check if get_vcpu_total returns appropriate cpu value Connection/OS/driver differenct does not matter for this method, - so everyone can execute for checking. + everyone can execute for checking. """ try: self._driver_dependent_test_setup() @@ -254,9 +254,87 @@ class LibvirtConnTestCase(test.TestCase): self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) - self.assertTrue(0 < conn.get_memory_mb()) + self.assertTrue(0 < conn.get_vcpu_total()) self.mox.UnsetStubs() + + def test_get_memory_mb_total(self): + """Check if get_memory_mb returns appropriate memory value""" + try: + self._driver_dependent_test_setup() + except: + return + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + self.assertTrue(0 < conn.get_memory_mb_total()) + self.mox.UnsetStubs() + + def test_get_local_gb_total(self): + """Check if get_local_gb_total returns appropriate disk value""" + # Note(masumotok): cannot test b/c FLAGS.instances_path is + # inevitable for this test.. + #try: + # self._driver_dependent_test_setup() + #except: + # return + # + #self.mox.ReplayAll() + #conn = libvirt_conn.LibvirtConnection(False) + #self.assertTrue(0 < conn.get_local_gb_total()) + #self.mox.UnsetStubs() + pass + + def test_get_vcpu_used(self): + """Check if get_local_gb_total returns appropriate disk value""" + try: + self._driver_dependent_test_setup() + except: + return + + self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn', use_mock_anything=True) + libvirt_conn.LibvirtConnection._conn.listDomainsID().AndReturn([1,2]) + vdmock = self.mox.CreateMock(libvirt.virDomain) + self.mox.StubOutWithMock(vdmock, "vcpus", use_mock_anything=True) + vdmock.vcpus().AndReturn(['', [('dummycpu'), ('dummycpu')]]) + vdmock.vcpus().AndReturn(['', [('dummycpu'), ('dummycpu')]]) + libvirt_conn.LibvirtConnection._conn.lookupByID(mox.IgnoreArg()).\ + AndReturn(vdmock) + libvirt_conn.LibvirtConnection._conn.lookupByID(mox.IgnoreArg()).\ + AndReturn(vdmock) + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + self.assertTrue( conn.get_vcpu_used() == 4) + self.mox.UnsetStubs() + + def test_get_memory_mb_used(self): + """Check if get_memory_mb returns appropriate memory value""" + try: + self._driver_dependent_test_setup() + except: + return + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + self.assertTrue(0 < conn.get_memory_mb_used()) + self.mox.UnsetStubs() + + def test_get_local_gb_used(self): + """Check if get_local_gb_total returns appropriate disk value""" + # Note(masumotok): cannot test b/c FLAGS.instances_path is + # inevitable for this test.. + #try: + # self._driver_dependent_test_setup() + #except: + # return + + #self.mox.ReplayAll() + #conn = libvirt_conn.LibvirtConnection(False) + #self.assertTrue(0 < conn.get_local_gb_used()) + #self.mox.UnsetStubs() + pass + def test_get_cpu_info_works_correctly(self): """ Check if get_cpu_info works correctly. diff --git a/nova/utils.py b/nova/utils.py index 6d3ddd092a2c..e8919d7a56f5 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -303,6 +303,24 @@ def str_dict_replace(s, mapping): return s +def mktmpfile(dir): + """create tmpfile under dir, and return filename.""" + filename = datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S') + fpath = os.path.join(dir, filename) + open(fpath, 'a+').write(fpath+'\n') + return fpath + + +def exists(filename): + """check file path existence.""" + return os.path.exists(filename) + + +def remove(filename): + """remove file.""" + return os.remove(filename) + + class LazyPluggable(object): """A pluggable backend loaded lazily based on some value.""" diff --git a/nova/virt/fake.py b/nova/virt/fake.py index f469af681a66..4bf477f5be27 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -314,16 +314,28 @@ class FakeConnection(object): """This method is supported only libvirt. """ return - def get_vcpu_number(self): + def get_vcpu_total(self): """This method is supported only libvirt. """ return - def get_memory_mb(self): - """This method is supported only libvirt..""" + def get_memory_mb_total(self): + """This method is supported only libvirt. """ return - def get_local_gb(self): - """This method is supported only libvirt..""" + def get_local_gb_total(self): + """This method is supported only libvirt. """ + return + + def get_vcpu_used(self): + """This method is supported only libvirt. """ + return + + def get_memory_mb_used(self): + """This method is supported only libvirt. """ + return + + def get_local_gb_used(self): + """This method is supported only libvirt. """ return def get_hypervisor_type(self): diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 49dd03c57c8e..aefa32dcb41b 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -835,21 +835,44 @@ class LibvirtConnection(object): return interfaces - def get_vcpu_number(self): + def get_vcpu_total(self): """ Get vcpu number of physical computer. """ - return self._conn.getMaxVcpus(None) + return open('/proc/cpuinfo').read().count('processor') - def get_memory_mb(self): - """Get the memory size of physical computer .""" + def get_memory_mb_total(self): + """Get the total memory size(MB) of physical computer .""" meminfo = open('/proc/meminfo').read().split() idx = meminfo.index('MemTotal:') # transforming kb to mb. return int(meminfo[idx + 1]) / 1024 - def get_local_gb(self): - """Get the hdd size of physical computer .""" + def get_local_gb_total(self): + """Get the total hdd size(GB) of physical computer .""" hddinfo = os.statvfs(FLAGS.instances_path) - return hddinfo.f_bsize * hddinfo.f_blocks / 1024 / 1024 / 1024 + return hddinfo.f_frsize * hddinfo.f_blocks / 1024 / 1024 / 1024 + + def get_vcpu_used(self): + """ Get vcpu available number of physical computer. """ + total = 0 + for i in self._conn.listDomainsID(): + dom = self._conn.lookupByID(i) + total += len(dom.vcpus()[1]) + return total + + def get_memory_mb_used(self): + """Get the free memory size(MB) of physical computer.""" + m = open('/proc/meminfo').read().split() + idx1 = m.index('MemFree:') + idx2 = m.index('Buffers:') + idx3 = m.index('Cached:') + avail = (int(m[idx1+1]) + int(m[idx2+1]) + int(m[idx3+1])) / 1024 + return self.get_memory_mb_total() - avail + + def get_local_gb_used(self): + """Get the free hdd size(GB) of physical computer .""" + hddinfo = os.statvfs(FLAGS.instances_path) + avail = hddinfo.f_frsize * hddinfo.f_bavail / 1024 / 1024 / 1024 + return self.get_local_gb_total() - avail def get_hypervisor_type(self): """ Get hypervisor type """ diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 1e7933f51455..902879d098cb 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -213,16 +213,28 @@ class XenAPIConnection(object): """This method is supported only libvirt. """ return - def get_vcpu_number(self): + def get_vcpu_total(self): """This method is supported only libvirt. """ return - def get_memory_mb(self): - """This method is supported only libvirt..""" + def get_memory_mb_total(self): + """This method is supported only libvirt. """ return - def get_local_gb(self): - """This method is supported only libvirt..""" + def get_local_gb_total(self): + """This method is supported only libvirt. """ + return + + def get_vcpu_used(self): + """This method is supported only libvirt. """ + return + + def get_memory_mb_used(self): + """This method is supported only libvirt. """ + return + + def get_local_gb_used(self): + """This method is supported only libvirt. """ return def get_hypervisor_type(self): From d88d74c9a0a28e0ebd6cedf694753b9ee9decdac Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Fri, 18 Feb 2011 14:15:04 +0900 Subject: [PATCH 03/76] fixed based on reviewer's comment. 1. erase wrapper function(remove/exists/mktempfile) from nova.utils. 2. nova-manage service describeresource(->describe_resource) 3. nova-manage service updateresource(->update_resource) 4. erase "my mistake print" statement Additional changes are made at: 1. nova.image.s3.show 2. nova.compute.api.create that's because instances cannot launched without this changes. --- bin/nova-manage | 10 +++++----- nova/compute/api.py | 4 ++-- nova/compute/manager.py | 15 ++++++++++----- .../migrate_repo/versions/002_bexar.py | 9 --------- nova/image/s3.py | 2 +- nova/scheduler/manager.py | 1 - nova/utils.py | 18 ------------------ nova/virt/disk.py | 1 - nova/virt/libvirt_conn.py | 2 +- 9 files changed, 19 insertions(+), 43 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index 7336a582b827..0bfe0d969007 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -574,7 +574,7 @@ class ServiceCommands(object): return db.service_update(ctxt, svc['id'], {'disabled': True}) - def describeresource(self, host): + def describe_resource(self, host): """describe cpu/memory/hdd info for host.""" result = rpc.call(context.get_admin_context(), @@ -606,7 +606,7 @@ class ServiceCommands(object): val['memory_mb'], val['local_gb']) - def updateresource(self, host): + def update_resource(self, host): """update available vcpu/memory/disk info for host.""" ctxt = context.get_admin_context() @@ -618,9 +618,9 @@ class ServiceCommands(object): if len(service_refs) <= 0: raise exception.Invalid(_('%s is not compute node.') % host) - result = rpc.call(ctxt, - db.queue_get_for(ctxt, FLAGS.compute_topic, host), - {"method": "update_available_resource"}) + rpc.call(ctxt, + db.queue_get_for(ctxt, FLAGS.compute_topic, host), + {"method": "update_available_resource"}) class LogCommands(object): diff --git a/nova/compute/api.py b/nova/compute/api.py index ac02dbcfa882..740dd393567d 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -103,9 +103,9 @@ class API(base.Base): if not is_vpn: image = self.image_service.show(context, image_id) if kernel_id is None: - kernel_id = image.get('kernelId', None) + kernel_id = image.get('kernel_id', None) if ramdisk_id is None: - ramdisk_id = image.get('ramdiskId', None) + ramdisk_id = image.get('ramdisk_id', None) # No kernel and ramdisk for raw images if kernel_id == str(FLAGS.null_kernel): kernel_id = None diff --git a/nova/compute/manager.py b/nova/compute/manager.py index cae95dd93fbe..47dd5fd5eb86 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -38,6 +38,8 @@ import datetime import random import string import socket +import os +import tempfile import time import functools @@ -577,14 +579,17 @@ class ComputeManager(manager.Manager): @exception.wrap_exception def mktmpfile(self, context): """make tmpfile under FLAGS.instance_path.""" - return utils.mktmpfile(FLAGS.instances_path) + fd, name = tempfile.mkstemp(dir=FLAGS.instances_path) + # No essential reason to write dateinfo. just for debugging reason. + os.fdopen(fd, 'w').write(str(datetime.datetime.utcnow())) + return name @exception.wrap_exception def confirm_tmpfile(self, context, path): """Confirm existence of the tmpfile given by path.""" - if not utils.exists(path): + if not os.path.exists(path): raise exception.NotFound(_('%s not found') % path) - return utils.remove(path) + return os.remove(path) @exception.wrap_exception def update_available_resource(self, context): @@ -683,7 +688,7 @@ class ComputeManager(manager.Manager): Post operations for live migration. Mainly, database updating. """ - LOG.info('post_live_migration() is started..') + LOG.info(_('post_live_migration() is started..')) instance_id = instance_ref['id'] # Detaching volumes. @@ -705,7 +710,7 @@ class ComputeManager(manager.Manager): # Not return if fixed_ip is not found, otherwise, # instance never be accessible.. if None == fixed_ip: - logging.warn('fixed_ip is not found for %s ' % i_name) + LOG.warn(_('fixed_ip is not found for %s.') % i_name) self.db.fixed_ip_update(ctxt, fixed_ip, {'host': dest}) try: diff --git a/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py index 38210db85d17..699b837f8b20 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py @@ -229,12 +229,3 @@ def upgrade(migrate_engine): networks.create_column(networks_cidr_v6) networks.create_column(networks_ra_server) services.create_column(services_availability_zone) - #services.create_column(services_vcpus) - #services.create_column(services_memory_mb) - #services.create_column(services_local_gb) - #services.create_column(services_vcpus_used) - #services.create_column(services_memory_mb_used) - #services.create_column(services_local_gb_used) - #services.create_column(services_hypervisor_type) - #services.create_column(services_hypervisor_version) - #services.create_column(services_cpu_info) diff --git a/nova/image/s3.py b/nova/image/s3.py index 71304cdd66f3..14135a1ee432 100644 --- a/nova/image/s3.py +++ b/nova/image/s3.py @@ -94,7 +94,7 @@ class S3ImageService(service.BaseImageService): if FLAGS.connection_type == 'fake': return {'imageId': 'bar'} result = self.index(context) - result = [i for i in result if i['imageId'] == image_id] + result = [i for i in result if i['id'] == image_id] if not result: raise exception.NotFound(_('Image %s could not be found') % image_id) diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index af54c72bebcb..ea7ae7bd24d8 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -60,7 +60,6 @@ class SchedulerManager(manager.Manager): host = getattr(self.driver, driver_method)(elevated, *args, **kwargs) except AttributeError, e: - print 'manager.attrerr', e host = self.driver.schedule(elevated, topic, *args, **kwargs) rpc.cast(context, diff --git a/nova/utils.py b/nova/utils.py index 966dde667813..8d7ff1f641e2 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -337,24 +337,6 @@ def str_dict_replace(s, mapping): return s -def mktmpfile(dir): - """create tmpfile under dir, and return filename.""" - filename = datetime.datetime.utcnow().strftime('%Y%m%d%H%M%S') - fpath = os.path.join(dir, filename) - open(fpath, 'a+').write(fpath + '\n') - return fpath - - -def exists(filename): - """check file path existence.""" - return os.path.exists(filename) - - -def remove(filename): - """remove file.""" - return os.remove(filename) - - class LazyPluggable(object): """A pluggable backend loaded lazily based on some value.""" diff --git a/nova/virt/disk.py b/nova/virt/disk.py index ec4acc4528e8..c5565abfaf8c 100644 --- a/nova/virt/disk.py +++ b/nova/virt/disk.py @@ -112,7 +112,6 @@ def inject_data(image, key=None, net=None, partition=None, nbd=False): def _link_device(image, nbd): """Link image to device using loopback or nbd""" - print '_link_device:0:', nbd, '::', image if nbd: device = _allocate_device() utils.execute('sudo qemu-nbd -c %s %s' % (device, image)) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 9b7a9ddbebfa..579c4593e21c 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -977,7 +977,7 @@ class LibvirtConnection(object): """ Update compute manager resource info on Service table. This method is called when nova-coompute launches, and - whenever admin executes "nova-manage service updateresource". + whenever admin executes "nova-manage service update_resource". """ try: From 764f0a457e74c4498cbc9ea30a184e61f7932072 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Tue, 22 Feb 2011 13:18:21 +0900 Subject: [PATCH 04/76] just add 005_add_live_migration.py. --- .../versions/005_add_live_migration.py | 84 +++++++++++++++++++ 1 file changed, 84 insertions(+) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/005_add_live_migration.py diff --git a/nova/db/sqlalchemy/migrate_repo/versions/005_add_live_migration.py b/nova/db/sqlalchemy/migrate_repo/versions/005_add_live_migration.py new file mode 100644 index 000000000000..903f7a646502 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/005_add_live_migration.py @@ -0,0 +1,84 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + +instances = Table('instances', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +# +# New Tables +# + +compute_services = Table('compute_services', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('service_id', Integer(), nullable=False), + + Column('vcpus', Integer(), nullable=False), + Column('memory_mb', Integer(), nullable=False), + Column('local_gb', Integer(), nullable=False), + Column('vcpus_used', Integer(), nullable=False), + Column('memory_mb_used', Integer(), nullable=False), + Column('local_gb_used', Integer(), nullable=False), + Column('hypervisor_type', + Text(convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=False), + Column('hypervisor_version', Integer(), nullable=False), + Column('cpu_info', + Text(convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=False), + ) + + +# +# Tables to alter +# +instances_launched_on = Column( + 'launched_on', + Text(convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + try: + compute_services.create() + except Exception: + logging.info(repr(compute_services)) + logging.exception('Exception while creating table') + meta.drop_all(tables=[compute_services]) + raise + + instances.create_column(instances_launched_on) From c32e57999be09368b18f5a89315465e629ed4819 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Tue, 22 Feb 2011 23:55:03 +0900 Subject: [PATCH 05/76] Fixed based on reviewer's comment. 1. Change docstrings format 2. Fix comment grammer mistake, etc --- bin/nova-api | 2 - bin/nova-dhcpbridge | 1 - bin/nova-manage | 28 +- nova/compute/manager.py | 123 ++++--- nova/db/api.py | 12 +- nova/db/sqlalchemy/api.py | 27 +- .../versions/005_add_live_migration.py | 3 +- nova/db/sqlalchemy/models.py | 12 +- nova/scheduler/driver.py | 121 ++++--- nova/scheduler/manager.py | 15 +- nova/tests/test_compute.py | 75 ++--- nova/tests/test_scheduler.py | 141 +++----- nova/tests/test_service.py | 6 +- nova/tests/test_virt.py | 307 ++++++++---------- nova/virt/fake.py | 74 +---- nova/virt/libvirt_conn.py | 182 ++++++++--- nova/virt/xenapi_conn.py | 14 +- nova/volume/driver.py | 14 +- 18 files changed, 574 insertions(+), 583 deletions(-) diff --git a/bin/nova-api b/bin/nova-api index 59466a8c69b2..11176a0215d9 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -38,13 +38,11 @@ from nova import flags from nova import log as logging from nova import version from nova import wsgi -from nova import utils logging.basicConfig() LOG = logging.getLogger('nova.api') LOG.setLevel(logging.DEBUG) -utils.default_flagfile() FLAGS = flags.FLAGS API_ENDPOINTS = ['ec2', 'osapi'] diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index fb04a484e0ec..d38ba2543cf2 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -125,7 +125,6 @@ def main(): LOG.debug(msg) globals()[action + '_lease'](mac, ip, hostname, interface) else: - open('/tmp/aaa', 'w+').write('-- %s' % interface) print init_leases(interface) if __name__ == "__main__": diff --git a/bin/nova-manage b/bin/nova-manage index 696ce0cadd13..49246fcc8065 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -548,7 +548,12 @@ class InstanceCommands(object): """Class for mangaging VM instances.""" def live_migration(self, ec2_id, dest): - """Migrates a running instance to a new machine.""" + """Migrates a running instance to a new machine. + + :param ec2_id: instance id which comes from euca-describe-instance. + :param dest: destination host name. + + """ ctxt = context.get_admin_context() instance_id = ec2_id_to_id(ec2_id) @@ -569,9 +574,8 @@ class InstanceCommands(object): "dest": dest, "topic": FLAGS.compute_topic}}) - msg = 'Migration of %s initiated. ' % ec2_id - msg += 'Check its progress using euca-describe-instances.' - print msg + print _('Migration of %s initiated.' + 'Check its progress using euca-describe-instances.') % ec2_id class ServiceCommands(object): @@ -619,15 +623,17 @@ class ServiceCommands(object): db.service_update(ctxt, svc['id'], {'disabled': True}) def describe_resource(self, host): - """describe cpu/memory/hdd info for host.""" + """Describes cpu/memory/hdd info for host. + + :param host: hostname. + + """ result = rpc.call(context.get_admin_context(), FLAGS.scheduler_topic, - {"method": "show_host_resource", + {"method": "show_host_resources", "args": {"host": host}}) - # Checking result msg format is necessary, that will have done - # when this feture is included in API. if type(result) != dict: print 'Unexpected error occurs' print '[Result]', result @@ -650,7 +656,11 @@ class ServiceCommands(object): val['local_gb']) def update_resource(self, host): - """update available vcpu/memory/disk info for host.""" + """Updates available vcpu/memory/disk info for host. + + :param host: hostname. + + """ ctxt = context.get_admin_context() service_refs = db.service_get_all_by_host(ctxt, host) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index d548cef6f0ed..5b6e9082ed93 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -36,10 +36,10 @@ terminating it. import base64 import datetime +import os import random import string import socket -import os import tempfile import time import functools @@ -65,8 +65,8 @@ flags.DEFINE_string('console_host', socket.gethostname(), 'Console proxy host to use to connect to instances on' 'this host.') flags.DEFINE_string('live_migration_retry_count', 30, - ("""Retry count needed in live_migration.""" - """ sleep 1 sec for each count""")) + ("Retry count needed in live_migration." + " sleep 1 sec for each count")) LOG = logging.getLogger('nova.compute.manager') @@ -602,31 +602,66 @@ class ComputeManager(manager.Manager): @exception.wrap_exception def compare_cpu(self, context, cpu_info): - """ Check the host cpu is compatible to a cpu given by xml.""" + """Checks the host cpu is compatible to a cpu given by xml. + + :param context: security context + :param cpu_info: json string obtained from virConnect.getCapabilities + :returns: See driver.compare_cpu + + """ return self.driver.compare_cpu(cpu_info) @exception.wrap_exception def mktmpfile(self, context): - """make tmpfile under FLAGS.instance_path.""" - fd, name = tempfile.mkstemp(dir=FLAGS.instances_path) - # No essential reason to write dateinfo. just for debugging reason. - os.fdopen(fd, 'w').write(str(datetime.datetime.utcnow())) + """Makes tmpfile under FLAGS.instance_path. + + This method enables compute nodes to recognize that they mounts + same shared storage. mktmpfile()/confirm_tmpfile is a pair. + + :param context: security context + :returns: tmpfile name + + """ + + dirpath = FLAGS.instances_path + fd, name = tempfile.mkstemp(dir=dirpath) + LOG.debug(_("Creating tmpfile %s to notify to other " + "compute node that they mounts same storage.") % name) + os.fdopen(fd, 'w+').close() return name @exception.wrap_exception def confirm_tmpfile(self, context, path): - """Confirm existence of the tmpfile given by path.""" + """Confirms existence of the tmpfile given by path. + + :param context: security context + :param path: confirm existence of this path + :returns: depends on os.remove() + + """ + if not os.path.exists(path): raise exception.NotFound(_('%s not found') % path) return os.remove(path) @exception.wrap_exception def update_available_resource(self, context): - """See comments update_resource_info""" + """See comments update_resource_info. + + :param context: security context + :returns: See driver.update_available_resource() + + """ + return self.driver.update_available_resource(context, self.host) def pre_live_migration(self, context, instance_id): - """Any preparation for live migration at dst host.""" + """Preparations for live migration at dest host. + + :param context: security context + :param instance_id: nova.db.sqlalchemy.models.Instance.Id + + """ # Getting instance info instance_ref = self.db.instance_get(context, instance_id) @@ -635,7 +670,7 @@ class ComputeManager(manager.Manager): # Getting fixed ips fixed_ip = self.db.instance_get_fixed_address(context, instance_id) if not fixed_ip: - msg = _("%(instance_id)s(%(ec2_id)s) doesnt have fixed_ip") + msg = _("%(instance_id)s(%(ec2_id)s) does'nt have fixed_ip") raise exception.NotFound(msg % locals()) # If any volume is mounted, prepare here. @@ -645,8 +680,8 @@ class ComputeManager(manager.Manager): for v in instance_ref['volumes']: self.volume_manager.setup_compute_volume(context, v['id']) - # Bridge settings - # call this method prior to ensure_filtering_rules_for_instance, + # Bridge settings. + # Call this method prior to ensure_filtering_rules_for_instance, # since bridge is not set up, ensure_filtering_rules_for instance # fails. # @@ -660,24 +695,29 @@ class ComputeManager(manager.Manager): break except exception.ProcessExecutionError, e: if i == max_retry - 1: - raise e + raise else: - LOG.warn(_("setup_compute_network() fail %(i)d th. " - "Retry up to %(max_retry)d for %(ec2_id)s") + LOG.warn(_("setup_compute_network() failed %(i)d." + "Retry up to %(max_retry)d for %(ec2_id)s.") % locals()) time.sleep(1) # Creating filters to hypervisors and firewalls. # An example is that nova-instance-instance-xxx, - # which is written to libvirt.xml( check "virsh nwfilter-list ) - # On destination host, this nwfilter is necessary. + # which is written to libvirt.xml(Check "virsh nwfilter-list") + # This nwfilter is necessary on the destination host. # In addition, this method is creating filtering rule # onto destination host. self.driver.ensure_filtering_rules_for_instance(instance_ref) - #@exception.wrap_exception def live_migration(self, context, instance_id, dest): - """Executing live migration.""" + """Executing live migration. + + :param context: security context + :param instance_id: nova.db.sqlalchemy.models.Instance.Id + :param dest: destination host + + """ # Get instance for error handling. instance_ref = self.db.instance_get(context, instance_id) @@ -702,7 +742,7 @@ class ComputeManager(manager.Manager): msg = _("Pre live migration for %(i_name)s failed at %(dest)s") LOG.error(msg % locals()) self.recover_live_migration(context, instance_ref) - raise e + raise # Executing live migration # live_migration might raises exceptions, but @@ -712,10 +752,17 @@ class ComputeManager(manager.Manager): self.recover_live_migration) def post_live_migration(self, ctxt, instance_ref, dest): + """Post operations for live migration. + + This method is called from live_migration + and mainly updating database record. + + :param ctxt: security context + :param instance_id: nova.db.sqlalchemy.models.Instance.Id + :param dest: destination host + """ - Post operations for live migration. - Mainly, database updating. - """ + LOG.info(_('post_live_migration() is started..')) instance_id = instance_ref['id'] @@ -734,19 +781,12 @@ class ComputeManager(manager.Manager): # Database updating. i_name = instance_ref.name - #fixed_ip = self.db.instance_get_fixed_address(ctxt, instance_id) - # Not return if fixed_ip is not found, otherwise, - # instance never be accessible.. - #if None == fixed_ip: - # LOG.warn(_('fixed_ip is not found for %s.') % i_name) - #self.db.fixed_ip_update(ctxt, fixed_ip, {'host': dest}) - try: # Not return if floating_ip is not found, otherwise, # instance never be accessible.. floating_ip = self.db.instance_get_floating_address(ctxt, instance_id) - if None == floating_ip: + if not floating_ip: LOG.info(_('floating_ip is not found for %s'), i_name) else: floating_ip_ref = self.db.floating_ip_get_by_address(ctxt, @@ -763,15 +803,23 @@ class ComputeManager(manager.Manager): # Restore instance/volume state self.recover_live_migration(ctxt, instance_ref, dest) - msg = _('Migrating %(i_name)s to %(dest)s finishes successfully.') - LOG.info(msg % locals()) + LOG.info(_('Migrating %(i_name)s to %(dest)s finishes successfully.') + % locals()) LOG.info(_("The below error is normally occurs." "Just check if instance is successfully migrated.\n" "libvir: QEMU error : Domain not found: no domain " "with matching name..")) def recover_live_migration(self, ctxt, instance_ref, host=None): - """Instance/volume state is recovered from migrating -> running.""" + """Recovers Instance/volume state from migrating -> running. + + :param ctxt: security context + :param instance_id: nova.db.sqlalchemy.models.Instance.Id + :param host: + DB column value is updated by this hostname. + if none, the host instance currently running is selected. + + """ if not host: host = instance_ref['host'] @@ -783,5 +831,4 @@ class ComputeManager(manager.Manager): 'host': host}) for v in instance_ref['volumes']: - self.db.volume_update(ctxt, v['id'], {'status': 'in-use', - 'host': host}) + self.db.volume_update(ctxt, v['id'], {'status': 'in-use'}) diff --git a/nova/db/api.py b/nova/db/api.py index 609f62495ea3..e10a0617810a 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -169,6 +169,7 @@ def compute_service_update(context, compute_id, values): Raises NotFound if computeService does not exist. """ + return IMPL.compute_service_update(context, compute_id, values) @@ -446,27 +447,22 @@ def instance_add_security_group(context, instance_id, security_group_id): security_group_id) -def instance_get_all_by_host(context, hostname): - """Get instances by host""" - return IMPL.instance_get_all_by_host(context, hostname) - - def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id): - """Get instances.vcpus by host and project""" + """Get instances.vcpus by host and project.""" return IMPL.instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id) def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id): - """Get amount of memory by host and project """ + """Get amount of memory by host and project.""" return IMPL.instance_get_memory_sum_by_host_and_project(context, hostname, proj_id) def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id): - """Get total amount of disk by host and project """ + """Get total amount of disk by host and project.""" return IMPL.instance_get_disk_sum_by_host_and_project(context, hostname, proj_id) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 43d56cd8a3fa..b4f45a08982f 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -184,8 +184,8 @@ def service_get_all_compute_by_host(context, host): all() if not result: - msg = _('%s does not exist or not compute node') - raise exception.NotFound(msg % host) + raise exception.NotFound(_("%s does not exist or not " + "compute node.") % host) return result @@ -328,7 +328,7 @@ def compute_service_create(context, values): def compute_service_update(context, compute_id, values): session = get_session() with session.begin(): - compute_ref = service_get(context, compute_id, session=session) + compute_ref = compute_service_get(context, compute_id, session=session) compute_ref.update(values) compute_ref.save(session=session) @@ -964,21 +964,6 @@ def instance_add_security_group(context, instance_id, security_group_id): instance_ref.save(session=session) -@require_context -def instance_get_all_by_host(context, hostname): - session = get_session() - if not session: - session = get_session() - - result = session.query(models.Instance).\ - filter_by(host=hostname).\ - filter_by(deleted=can_read_deleted(context)).\ - all() - if not result: - return [] - return result - - @require_context def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id): session = get_session() @@ -987,7 +972,7 @@ def instance_get_vcpu_sum_by_host_and_project(context, hostname, proj_id): filter_by(project_id=proj_id).\ filter_by(deleted=False).\ value(func.sum(models.Instance.vcpus)) - if None == result: + if not result: return 0 return result @@ -1000,7 +985,7 @@ def instance_get_memory_sum_by_host_and_project(context, hostname, proj_id): filter_by(project_id=proj_id).\ filter_by(deleted=False).\ value(func.sum(models.Instance.memory_mb)) - if None == result: + if not result: return 0 return result @@ -1013,7 +998,7 @@ def instance_get_disk_sum_by_host_and_project(context, hostname, proj_id): filter_by(project_id=proj_id).\ filter_by(deleted=False).\ value(func.sum(models.Instance.local_gb)) - if None == result: + if not result: return 0 return result diff --git a/nova/db/sqlalchemy/migrate_repo/versions/005_add_live_migration.py b/nova/db/sqlalchemy/migrate_repo/versions/005_add_live_migration.py index 903f7a646502..2689b5b7485d 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/005_add_live_migration.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/005_add_live_migration.py @@ -16,10 +16,9 @@ # License for the specific language governing permissions and limitations # under the License. -from sqlalchemy import * from migrate import * - from nova import log as logging +from sqlalchemy import * meta = MetaData() diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 02d4e2f9b234..f2a029c203a7 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -137,12 +137,14 @@ class ComputeService(BASE, NovaBase): # Note(masumotok): Expected Strings example: # - # '{"arch":"x86_64", "model":"Nehalem", - # "topology":{"sockets":1, "threads":2, "cores":3}, - # features:[ "tdtscp", "xtpr"]}' + # '{"arch":"x86_64", + # "model":"Nehalem", + # "topology":{"sockets":1, "threads":2, "cores":3}, + # "features":["tdtscp", "xtpr"]}' # # Points are "json translatable" and it must have all dictionary keys - # above, and tag of getCapabilities()(See libvirt.virtConnection). + # above, since it is copied from tag of getCapabilities() + # (See libvirt.virtConnection). cpu_info = Column(Text, nullable=True) @@ -220,7 +222,7 @@ class Instance(BASE, NovaBase): display_description = Column(String(255)) # To remember on which host a instance booted. - # An instance may moved to other host by live migraiton. + # An instance may have moved to another host by live migraiton. launched_on = Column(Text) locked = Column(Boolean) diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index 41b87bbcae15..8c30702ba616 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -70,9 +70,18 @@ class Scheduler(object): raise NotImplementedError(_("Must implement a fallback schedule")) def schedule_live_migration(self, context, instance_id, dest): - """live migration method""" + """Live migration scheduling method. - # Whether instance exists and running + :param context: + :param instance_id: + :param dest: destination host + :return: + The host where instance is running currently. + Then scheduler send request that host. + + """ + + # Whether instance exists and is running. instance_ref = db.instance_get(context, instance_id) # Checking instance. @@ -102,11 +111,16 @@ class Scheduler(object): return src def _live_migration_src_check(self, context, instance_ref): - """Live migration check routine (for src host)""" + """Live migration check routine (for src host). + + :param context: security context + :param instance_ref: nova.db.sqlalchemy.models.Instance object + + """ # Checking instance is running. - if power_state.RUNNING != instance_ref['state'] or \ - 'running' != instance_ref['state_description']: + if (power_state.RUNNING != instance_ref['state'] or \ + 'running' != instance_ref['state_description']): msg = _('Instance(%s) is not running') ec2_id = instance_ref['hostname'] raise exception.Invalid(msg % ec2_id) @@ -129,7 +143,13 @@ class Scheduler(object): raise exception.Invalid(msg % src) def _live_migration_dest_check(self, context, instance_ref, dest): - """Live migration check routine (for destination host)""" + """Live migration check routine (for destination host). + + :param context: security context + :param instance_ref: nova.db.sqlalchemy.models.Instance object + :param dest: destination host + + """ # Checking dest exists and compute node. dservice_refs = db.service_get_all_compute_by_host(context, dest) @@ -145,20 +165,25 @@ class Scheduler(object): src = instance_ref['host'] if dest == src: ec2_id = instance_ref['hostname'] - msg = _("""%(dest)s is where %(ec2_id)s is """ - """running now. choose other host.""") % locals() - raise exception.Invalid(msg) + raise exception.Invalid(_("%(dest)s is where %(ec2_id)s is " + "running now. choose other host.") + % locals()) # Checking dst host still has enough capacities. - self.has_enough_resource(context, instance_ref, dest) + self.has_enough_resources(context, instance_ref, dest) def _live_migration_common_check(self, context, instance_ref, dest): - """ - Live migration check routine. - Below pre-checkings are followed by + """Live migration common check routine. + + Below checkings are followed by http://wiki.libvirt.org/page/TodoPreMigrationChecks + :param context: security context + :param instance_ref: nova.db.sqlalchemy.models.Instance object + :param dest: destination host + """ + # Checking shared storage connectivity self.mounted_on_same_shared_storage(context, instance_ref, dest) @@ -168,27 +193,27 @@ class Scheduler(object): # Checking original host( where instance was launched at) exists. try: - oservice_refs = \ - db.service_get_all_compute_by_host(context, - instance_ref['launched_on']) + oservice_refs = db.service_get_all_compute_by_host(context, + instance_ref['launched_on']) except exception.NotFound: - msg = _('%s(where instance was launched at) does not exists.') - raise exception.Invalid(msg % instance_ref['launched_on']) + raise exception.Invalid(_("host %s where instance was launched " + "does not exist.") + % instance_ref['launched_on']) oservice_ref = oservice_refs[0]['compute_service'][0] # Checking hypervisor is same. o = oservice_ref['hypervisor_type'] d = dservice_ref['hypervisor_type'] if o != d: - msg = _('Different hypervisor type(%(o)s->%(d)s)') % locals() - raise exception.Invalid(msg) + raise exception.Invalid(_("Different hypervisor type" + "(%(o)s->%(d)s)')" % locals())) # Checkng hypervisor version. o = oservice_ref['hypervisor_version'] d = dservice_ref['hypervisor_version'] if o > d: - msg = _('Older hypervisor version(%(o)s->%(d)s)') % locals() - raise exception.Invalid(msg) + raise exception.Invalid(_('Older hypervisor version(%(o)s->%(d)s)') + % locals()) # Checking cpuinfo. try: @@ -200,19 +225,24 @@ class Scheduler(object): except rpc.RemoteError, e: ec2_id = instance_ref['hostname'] src = instance_ref['host'] - msg = _("""%(dest)s doesnt have compatibility to %(src)s""" - """(where %(ec2_id)s was launched at)""") - logging.exception(msg % locals()) - raise e + logging.exception(_("host %(dest)s is not compatible with " + "original host %(src)s.") % locals()) + raise + + def has_enough_resources(self, context, instance_ref, dest): + """Checks if destination host has enough resource for live migration. - def has_enough_resource(self, context, instance_ref, dest): - """ - Check if destination host has enough resource for live migration. Currently, only memory checking has been done. If storage migration(block migration, meaning live-migration without any shared storage) will be available, local storage checking is also necessary. + + :param context: security context + :param instance_ref: nova.db.sqlalchemy.models.Instance object + :param dest: destination host + """ + # Getting instance information ec2_id = instance_ref['hostname'] @@ -225,15 +255,23 @@ class Scheduler(object): mem_avail = mem_total - mem_used mem_inst = instance_ref['memory_mb'] if mem_avail <= mem_inst: - msg = _("""%(ec2_id)s is not capable to migrate %(dest)s""" - """(host:%(mem_avail)s <= instance:%(mem_inst)s)""") - raise exception.NotEmpty(msg % locals()) + raise exception.NotEmpty(_("%(ec2_id)s is not capable to " + "migrate %(dest)s (host:%(mem_avail)s " + " <= instance:%(mem_inst)s)") + % locals()) def mounted_on_same_shared_storage(self, context, instance_ref, dest): + """Check if the src and dest host mount same shared storage. + + At first, dest host creates temp file, and src host can see + it if they mounts same shared storage. Then src host erase it. + + :param context: security context + :param instance_ref: nova.db.sqlalchemy.models.Instance object + :param dest: destination host + """ - Check if /nova-inst-dir/insntances is mounted same storage at - live-migration src and dest host. - """ + src = instance_ref['host'] dst_t = db.queue_get_for(context, FLAGS.compute_topic, dest) src_t = db.queue_get_for(context, FLAGS.compute_topic, src) @@ -243,8 +281,8 @@ class Scheduler(object): filename = rpc.call(context, dst_t, {"method": 'mktmpfile'}) except rpc.RemoteError, e: msg = _("Cannot create tmpfile at %s to confirm shared storage.") - logging.error(msg % FLAGS.instance_path) - raise e + LOG.error(msg % FLAGS.instances_path) + raise # make sure existence at src host. try: @@ -252,8 +290,7 @@ class Scheduler(object): {"method": 'confirm_tmpfile', "args": {'path': filename}}) except (rpc.RemoteError, exception.NotFound), e: - ipath = FLAGS.instance_path - msg = _("""Cannot comfirm %(ipath)s at %(dest)s is located at""" - """ same shared storage.""") % locals() - logging.error(msg) - raise e + ipath = FLAGS.instances_path + logging.error(_("Cannot comfirm %(ipath)s at %(dest)s is " + "located at same shared storage.") % locals()) + raise diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index 584dc49d285a..783594c6f19d 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -69,10 +69,19 @@ class SchedulerManager(manager.Manager): LOG.debug(_("Casting to %(topic)s %(host)s for %(method)s") % locals()) # NOTE (masumotok) : This method should be moved to nova.api.ec2.admin. - # Based on bear design summit discussion, + # Based on bexar design summit discussion, # just put this here for bexar release. - def show_host_resource(self, context, host, *args): - """show the physical/usage resource given by hosts.""" + def show_host_resources(self, context, host, *args): + """Shows the physical/usage resource given by hosts. + + :param context: security context + :param host: hostname + :returns: + example format is below. + {'resource':D, 'usage':{proj_id1:D, proj_id2:D}} + D: {'vcpus':3, 'memory_mb':2048, 'local_gb':2048} + + """ compute_ref = db.service_get_all_compute_by_host(context, host) compute_ref = compute_ref[0] diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 74cb82eeb686..3c88d186d081 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -314,10 +314,7 @@ class ComputeTestCase(test.TestCase): self.compute_driver = utils.import_object(FLAGS.compute_driver) def test_pre_live_migration_instance_has_no_fixed_ip(self): - """ - if instances that are intended to be migrated doesnt have fixed_ip - (not happens usually), pre_live_migration has to raise Exception. - """ + """Confirm raising exception if instance doesn't have fixed_ip.""" instance_ref = self._get_dummy_instance() c = context.get_admin_context() i_id = instance_ref['id'] @@ -331,14 +328,9 @@ class ComputeTestCase(test.TestCase): self.assertRaises(exception.NotFound, self.compute.pre_live_migration, c, instance_ref['id']) - self.mox.ResetAll() def test_pre_live_migration_instance_has_volume(self): - """if any volumes are attached to the instances that are - intended to be migrated, setup_compute_volume must be - called because aoe module should be inserted at destination - host. This testcase checks on it. - """ + """Confirm setup_compute_volume is called when volume is mounted.""" i_ref = self._get_dummy_instance() c = context.get_admin_context() @@ -364,14 +356,9 @@ class ComputeTestCase(test.TestCase): self.mox.ReplayAll() ret = self.compute.pre_live_migration(c, i_ref['id']) self.assertEqual(ret, None) - self.mox.ResetAll() def test_pre_live_migration_instance_has_no_volume(self): - """if any volumes are not attached to the instances that are - intended to be migrated, log message should be appears - because administrator can proove instance conditions before - live_migration if any trouble occurs. - """ + """Confirm log meg when instance doesn't mount any volumes.""" i_ref = self._get_dummy_instance() i_ref.__setitem__('volumes', []) c = context.get_admin_context() @@ -395,14 +382,14 @@ class ComputeTestCase(test.TestCase): self.mox.ReplayAll() ret = self.compute.pre_live_migration(c, i_ref['id']) self.assertEqual(ret, None) - self.mox.ResetAll() def test_pre_live_migration_setup_compute_node_fail(self): - """setup_compute_node sometimes fail since concurrent request - comes to iptables and iptables complains. Then this method - tries to retry, but raise exception in case of over - max_retry_count. this method confirms raising exception. + """Confirm operation setup_compute_network() fails. + + It retries and raise exception when timeout exceeded. + """ + i_ref = self._get_dummy_instance() c = context.get_admin_context() @@ -427,14 +414,9 @@ class ComputeTestCase(test.TestCase): self.assertRaises(exception.ProcessExecutionError, self.compute.pre_live_migration, c, i_ref['id']) - self.mox.ResetAll() - def test_live_migration_instance_has_volume(self): - """Any volumes are mounted by instances to be migrated are found, - vblade health must be checked before starting live-migration. - And that is checked by check_for_export(). - This testcase confirms check_for_export() is called. - """ + def test_live_migration_works_correctly_with_volume(self): + """Confirm check_for_export to confirm volume health check.""" i_ref = self._get_dummy_instance() c = context.get_admin_context() topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host']) @@ -457,15 +439,9 @@ class ComputeTestCase(test.TestCase): self.mox.ReplayAll() ret = self.compute.live_migration(c, i_ref['id'], i_ref['host']) self.assertEqual(ret, None) - self.mox.ResetAll() - def test_live_migration_instance_has_volume_and_exception(self): - """In addition to test_live_migration_instance_has_volume testcase, - this testcase confirms if any exception raises from - check_for_export(). Then, valid seaquence of this method should - recovering instance/volumes status(ex. instance['state_description'] - is changed from 'migrating' -> 'running', was changed by scheduler) - """ + def test_live_migration_dest_raises_exception(self): + """Confirm exception when pre_live_migration fails.""" i_ref = self._get_dummy_instance() c = context.get_admin_context() topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host']) @@ -484,20 +460,16 @@ class ComputeTestCase(test.TestCase): 'state': power_state.RUNNING, 'host': i_ref['host']}) for v in i_ref['volumes']: - dbmock.volume_update(c, v['id'], {'status': 'in-use', - 'host': i_ref['host']}) + dbmock.volume_update(c, v['id'], {'status': 'in-use'}) self.compute.db = dbmock self.mox.ReplayAll() self.assertRaises(rpc.RemoteError, self.compute.live_migration, c, i_ref['id'], i_ref['host']) - self.mox.ResetAll() - def test_live_migration_instance_has_no_volume_and_exception(self): - """Simpler than - test_live_migration_instance_has_volume_and_exception - """ + def test_live_migration_dest_raises_exception_no_volume(self): + """Same as above test(input pattern is different) """ i_ref = self._get_dummy_instance() i_ref.__setitem__('volumes', []) c = context.get_admin_context() @@ -520,10 +492,9 @@ class ComputeTestCase(test.TestCase): self.assertRaises(rpc.RemoteError, self.compute.live_migration, c, i_ref['id'], i_ref['host']) - self.mox.ResetAll() - def test_live_migration_instance_has_no_volume(self): - """Simpler than test_live_migration_instance_has_volume.""" + def test_live_migration_works_correctly_no_volume(self): + """Confirm live_migration() works as expected correctly.""" i_ref = self._get_dummy_instance() i_ref.__setitem__('volumes', []) c = context.get_admin_context() @@ -545,11 +516,9 @@ class ComputeTestCase(test.TestCase): self.mox.ReplayAll() ret = self.compute.live_migration(c, i_ref['id'], i_ref['host']) self.assertEqual(ret, None) - self.mox.ResetAll() def test_post_live_migration_working_correctly(self): - """post_live_migration works as expected correctly """ - + """Confirm post_live_migration() works as expected correctly.""" dest = 'desthost' flo_addr = '1.2.1.2' @@ -579,19 +548,15 @@ class ComputeTestCase(test.TestCase): # executing self.mox.ReplayAll() ret = self.compute.post_live_migration(c, i_ref, dest) - self.mox.UnsetStubs() # make sure every data is rewritten to dest i_ref = db.instance_get(c, i_ref['id']) c1 = (i_ref['host'] == dest) - v_ref = db.volume_get(c, v_ref['id']) - c2 = (v_ref['host'] == dest) - c3 = False flo_refs = db.floating_ip_get_all_by_host(c, dest) - c3 = (len(flo_refs) != 0 and flo_refs[0]['address'] == flo_addr) + c2 = (len(flo_refs) != 0 and flo_refs[0]['address'] == flo_addr) # post operaton - self.assertTrue(c1 and c2 and c3) + self.assertTrue(c1 and c2) db.instance_destroy(c, instance_id) db.volume_destroy(c, v_ref['id']) db.floating_ip_destroy(c, flo_addr) diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index 729bcb580e99..301106848c7e 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -108,22 +108,21 @@ class SchedulerTestCase(test.TestCase): self.mox.ReplayAll() scheduler.named_method(ctxt, 'topic', num=7) - def test_show_host_resource_host_not_exit(self): - """ - A testcase of driver.has_enough_resource - given host does not exists. - """ + def test_show_host_resources_host_not_exit(self): + """A host given as an argument does not exists.""" + scheduler = manager.SchedulerManager() dest = 'dummydest' ctxt = context.get_admin_context() try: - scheduler.show_host_resource(ctxt, dest) + scheduler.show_host_resources(ctxt, dest) except exception.NotFound, e: c1 = (0 <= e.message.find('does not exist or not compute node')) self.assertTrue(c1) def _dic_is_equal(self, dic1, dic2, keys=None): + """Compares 2 dictionary contents(Helper method)""" if not keys: keys = ['vcpus', 'memory_mb', 'local_gb', 'vcpus_used', 'memory_mb_used', 'local_gb_used'] @@ -133,16 +132,14 @@ class SchedulerTestCase(test.TestCase): return False return True - def test_show_host_resource_no_project(self): - """ - A testcase of driver.show_host_resource - no instance stays on the given host - """ + def test_show_host_resources_no_project(self): + """No instance are running on the given host.""" + scheduler = manager.SchedulerManager() ctxt = context.get_admin_context() s_ref = self._create_compute_service() - result = scheduler.show_host_resource(ctxt, s_ref['host']) + result = scheduler.show_host_resources(ctxt, s_ref['host']) # result checking c1 = ('resource' in result and 'usage' in result) @@ -152,11 +149,9 @@ class SchedulerTestCase(test.TestCase): self.assertTrue(c1 and c2 and c3) db.service_destroy(ctxt, s_ref['id']) - def test_show_host_resource_works_correctly(self): - """ - A testcase of driver.show_host_resource - to make sure everything finished with no error. - """ + def test_show_host_resources_works_correctly(self): + """show_host_resources() works correctly as expected.""" + scheduler = manager.SchedulerManager() ctxt = context.get_admin_context() s_ref = self._create_compute_service() @@ -164,7 +159,7 @@ class SchedulerTestCase(test.TestCase): i_ref2 = self._create_instance(project_id='p-02', vcpus=3, host=s_ref['host']) - result = scheduler.show_host_resource(ctxt, s_ref['host']) + result = scheduler.show_host_resources(ctxt, s_ref['host']) c1 = ('resource' in result and 'usage' in result) compute_service = s_ref['compute_service'][0] @@ -284,6 +279,7 @@ class SimpleDriverTestCase(test.TestCase): return db.volume_create(self.context, vol)['id'] def _create_compute_service(self, **kwargs): + """Create a compute service.""" dic = {'binary': 'nova-compute', 'topic': 'compute', 'report_count': 0, 'availability_zone': 'dummyzone'} @@ -698,13 +694,13 @@ class SimpleDriverTestCase(test.TestCase): volume1.kill() volume2.kill() - def test_scheduler_live_migraiton_with_volume(self): - """ - driver.scheduler_live_migration finishes successfully - (volumes are attached to instances) - This testcase make sure schedule_live_migration - changes instance state from 'running' -> 'migrating' + def test_scheduler_live_migration_with_volume(self): + """scheduler_live_migration() works correctly as expected. + + Also, checks instance state is changed from 'running' -> 'migrating'. + """ + instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) dic = {'instance_id': instance_id, 'size': 1} @@ -737,11 +733,9 @@ class SimpleDriverTestCase(test.TestCase): db.instance_destroy(self.context, instance_id) db.volume_destroy(self.context, v_ref['id']) - def test_live_migraiton_src_check_instance_not_running(self): - """ - A testcase of driver._live_migration_src_check. - The instance given by instance_id is not running. - """ + def test_live_migration_src_check_instance_not_running(self): + """The instance given by instance_id is not running.""" + instance_id = self._create_instance(state_description='migrating') i_ref = db.instance_get(self.context, instance_id) @@ -754,12 +748,9 @@ class SimpleDriverTestCase(test.TestCase): self.assertTrue(c) db.instance_destroy(self.context, instance_id) - def test_live_migraiton_src_check_volume_node_not_alive(self): - """ - A testcase of driver._live_migration_src_check. - Volume node is not alive if any volumes are attached to - the given instance. - """ + def test_live_migration_src_check_volume_node_not_alive(self): + """Raise exception when volume node is not alive.""" + instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) dic = {'instance_id': instance_id, 'size': 1} @@ -782,11 +773,8 @@ class SimpleDriverTestCase(test.TestCase): db.service_destroy(self.context, s_ref['id']) db.volume_destroy(self.context, v_ref['id']) - def test_live_migraiton_src_check_compute_node_not_alive(self): - """ - A testcase of driver._live_migration_src_check. - The testcase make sure src-compute node is alive. - """ + def test_live_migration_src_check_compute_node_not_alive(self): + """Confirms src-compute node is alive.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) t = datetime.datetime.utcnow() - datetime.timedelta(10) @@ -803,11 +791,8 @@ class SimpleDriverTestCase(test.TestCase): db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) - def test_live_migraiton_src_check_works_correctly(self): - """ - A testcase of driver._live_migration_src_check. - The testcase make sure everything finished with no error. - """ + def test_live_migration_src_check_works_correctly(self): + """Confirms this method finishes with no error.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) s_ref = self._create_compute_service(host=i_ref['host']) @@ -819,11 +804,8 @@ class SimpleDriverTestCase(test.TestCase): db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) - def test_live_migraiton_dest_check_not_alive(self): - """ - A testcase of driver._live_migration_dst_check. - Destination host does not exist. - """ + def test_live_migration_dest_check_not_alive(self): + """Confirms exception raises in case dest host does not exist.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) t = datetime.datetime.utcnow() - datetime.timedelta(10) @@ -841,11 +823,8 @@ class SimpleDriverTestCase(test.TestCase): db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) - def test_live_migraiton_dest_check_service_same_host(self): - """ - A testcase of driver._live_migration_dst_check. - Destination host is same as src host. - """ + def test_live_migration_dest_check_service_same_host(self): + """Confirms exceptioin raises in case dest and src is same host.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) s_ref = self._create_compute_service(host=i_ref['host']) @@ -861,11 +840,8 @@ class SimpleDriverTestCase(test.TestCase): db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) - def test_live_migraiton_dest_check_service_lack_memory(self): - """ - A testcase of driver._live_migration_dst_check. - destination host doesnt have enough memory. - """ + def test_live_migration_dest_check_service_lack_memory(self): + """Confirms exception raises when dest doesn't have enough memory.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) s_ref = self._create_compute_service(host='somewhere', @@ -882,11 +858,8 @@ class SimpleDriverTestCase(test.TestCase): db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) - def test_live_migraiton_dest_check_service_works_correctly(self): - """ - A testcase of driver._live_migration_dst_check. - The testcase make sure everything finished with no error. - """ + def test_live_migration_dest_check_service_works_correctly(self): + """Confirms method finishes with no error.""" instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) s_ref = self._create_compute_service(host='somewhere', @@ -899,13 +872,11 @@ class SimpleDriverTestCase(test.TestCase): db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) - def test_live_migraiton_common_check_service_orig_not_exists(self): - """ - A testcase of driver._live_migration_common_check. - Destination host does not exist. - """ + def test_live_migration_common_check_service_orig_not_exists(self): + """Destination host does not exist.""" + dest = 'dummydest' - # mocks for live_migraiton_common_check() + # mocks for live_migration_common_check() instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) t1 = datetime.datetime.utcnow() - datetime.timedelta(10) @@ -929,18 +900,15 @@ class SimpleDriverTestCase(test.TestCase): i_ref, dest) except exception.Invalid, e: - c = (e.message.find('does not exists') >= 0) + c = (e.message.find('does not exist') >= 0) self.assertTrue(c) self.mox.UnsetStubs() db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) - def test_live_migraiton_common_check_service_different_hypervisor(self): - """ - A testcase of driver._live_migration_common_check. - Original host and dest host has different hypervisor type. - """ + def test_live_migration_common_check_service_different_hypervisor(self): + """Original host and dest host has different hypervisor type.""" dest = 'dummydest' instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) @@ -969,11 +937,8 @@ class SimpleDriverTestCase(test.TestCase): db.service_destroy(self.context, s_ref['id']) db.service_destroy(self.context, s_ref2['id']) - def test_live_migraiton_common_check_service_different_version(self): - """ - A testcase of driver._live_migration_common_check. - Original host and dest host has different hypervisor version. - """ + def test_live_migration_common_check_service_different_version(self): + """Original host and dest host has different hypervisor version.""" dest = 'dummydest' instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) @@ -1003,11 +968,9 @@ class SimpleDriverTestCase(test.TestCase): db.service_destroy(self.context, s_ref['id']) db.service_destroy(self.context, s_ref2['id']) - def test_live_migraiton_common_check_service_checking_cpuinfo_fail(self): - """ - A testcase of driver._live_migration_common_check. - Original host and dest host has different hypervisor version. - """ + def test_live_migration_common_check_checking_cpuinfo_fail(self): + """Raise excetion when original host doen't have compatible cpu.""" + dest = 'dummydest' instance_id = self._create_instance() i_ref = db.instance_get(self.context, instance_id) @@ -1025,7 +988,7 @@ class SimpleDriverTestCase(test.TestCase): rpc.call(mox.IgnoreArg(), mox.IgnoreArg(), {"method": 'compare_cpu', "args": {'cpu_info': s_ref2['compute_service'][0]['cpu_info']}}).\ - AndRaise(rpc.RemoteError('doesnt have compatibility to', '', '')) + AndRaise(rpc.RemoteError("doesn't have compatibility to", "", "")) self.mox.ReplayAll() try: @@ -1033,7 +996,7 @@ class SimpleDriverTestCase(test.TestCase): i_ref, dest) except rpc.RemoteError, e: - c = (e.message.find(_('doesnt have compatibility to')) >= 0) + c = (e.message.find(_("doesn't have compatibility to")) >= 0) self.assertTrue(c) self.mox.UnsetStubs() diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py index cb65584cfb21..bbd5c6d92b7a 100644 --- a/nova/tests/test_service.py +++ b/nova/tests/test_service.py @@ -279,11 +279,7 @@ class ServiceTestCase(test.TestCase): self.assert_(not serv.model_disconnected) def test_compute_can_update_available_resource(self): - """ - Test nova-compute successfully updated Service table on DB. - Doing so, self.manager.update_service must be called - if 'self.binary == nova-compute', and this testcase checks on it. - """ + """Confirm compute updates their record of compute-service table.""" host = 'foo' binary = 'nova-compute' topic = 'compute1' diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 8ed726c211e6..91bdfcc5ade8 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -23,8 +23,8 @@ from nova import context from nova import db from nova import exception from nova import flags -from nova import test from nova import logging +from nova import test from nova import utils from nova.api.ec2 import cloud from nova.auth import manager @@ -76,12 +76,12 @@ class LibvirtConnTestCase(test.TestCase): 'bridge': 'br101', 'instance_type': 'm1.small'} - def _driver_dependent_test_setup(self): - """ - Setup method. - Call this method at the top of each testcase method, - if the testcase is necessary libvirt and cheetah. - """ + def _driver_dependant_test_setup(self): + """Call this method at the top of each testcase method. + + Checks if libvirt and cheetah, etc is installed. + Otherwise, skip testing.""" + try: global libvirt global libxml2 @@ -92,10 +92,9 @@ class LibvirtConnTestCase(test.TestCase): except ImportError, e: logging.warn("""This test has not been done since """ """using driver-dependent library Cheetah/libvirt/libxml2.""") - raise e + raise # inebitable mocks for calling - #nova.virt.libvirt_conn.LibvirtConnection.__init__ obj = utils.import_object(FLAGS.firewall_driver) fwmock = self.mox.CreateMock(obj) self.mox.StubOutWithMock(libvirt_conn, 'utils', @@ -258,51 +257,31 @@ class LibvirtConnTestCase(test.TestCase): self.assertEquals(uri, testuri) def test_get_vcpu_total(self): - """ - Check if get_vcpu_total returns appropriate cpu value - Connection/OS/driver differenct does not matter for this method, - everyone can execute for checking. - """ + """Check if get_vcpu_total returns appropriate cpu value.""" try: - self._driver_dependent_test_setup() + self._driver_dependant_test_setup() except: return self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) self.assertTrue(0 < conn.get_vcpu_total()) - self.mox.UnsetStubs() def test_get_memory_mb_total(self): - """Check if get_memory_mb returns appropriate memory value""" + """Check if get_memory_mb returns appropriate memory value.""" try: - self._driver_dependent_test_setup() + self._driver_dependant_test_setup() except: return self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) self.assertTrue(0 < conn.get_memory_mb_total()) - self.mox.UnsetStubs() - - def test_get_local_gb_total(self): - """Check if get_local_gb_total returns appropriate disk value""" - # Note(masumotok): leave this b/c FLAGS.instances_path is inevitable.. - #try: - # self._driver_dependent_test_setup() - #except: - # return - # - #self.mox.ReplayAll() - #conn = libvirt_conn.LibvirtConnection(False) - #self.assertTrue(0 < conn.get_local_gb_total()) - #self.mox.UnsetStubs() - pass def test_get_vcpu_used(self): - """Check if get_local_gb_total returns appropriate disk value""" + """Check if get_local_gb_total returns appropriate disk value.""" try: - self._driver_dependent_test_setup() + self._driver_dependant_test_setup() except: return @@ -321,52 +300,45 @@ class LibvirtConnTestCase(test.TestCase): self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) self.assertTrue(conn.get_vcpu_used() == 4) - self.mox.UnsetStubs() def test_get_memory_mb_used(self): - """Check if get_memory_mb returns appropriate memory value""" + """Check if get_memory_mb returns appropriate memory value.""" try: - self._driver_dependent_test_setup() + self._driver_dependant_test_setup() except: return self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) self.assertTrue(0 < conn.get_memory_mb_used()) - self.mox.UnsetStubs() - - def test_get_local_gb_used(self): - """Check if get_local_gb_total returns appropriate disk value""" - # Note(masumotok): leave this b/c FLAGS.instances_path is inevitable - #try: - # self._driver_dependent_test_setup() - #except: - # return - - #self.mox.ReplayAll() - #conn = libvirt_conn.LibvirtConnection(False) - #self.assertTrue(0 < conn.get_local_gb_used()) - #self.mox.UnsetStubs() - pass def test_get_cpu_info_works_correctly(self): - """ - Check if get_cpu_info works correctly. - (in case libvirt.getCapabilities() works correctly) - """ - xml = ("""x86_64Nehalem""" - """Intel""" - """""" - """""" - """""" - """""" - """""" - """""" - """""") + """Check if get_cpu_info works correctly as expected.""" + xml = """ + x86_64 + Nehalem + Intel + + + + + + + + + + + + + + + + + + """ try: - self._driver_dependent_test_setup() + self._driver_dependant_test_setup() except: return self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, @@ -376,27 +348,34 @@ class LibvirtConnTestCase(test.TestCase): self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) self.assertTrue(0 < len(conn.get_cpu_info())) - self.mox.UnsetStubs() def test_get_cpu_info_inappropreate_xml(self): - """ - Check if get_cpu_info raises exception - in case libvirt.getCapabilities() returns wrong xml - (in case of xml doesnt have tag) - """ - xml = ("""x86_64Nehalem""" - """Intel""" - """""" - """""" - """""" - """""" - """""" - """""" - """""") + """Raise exception if given xml is inappropriate.""" + xml = """ + x86_64 + Nehalem + Intel + + + + + + + + + + + + + + + + + + """ try: - self._driver_dependent_test_setup() + self._driver_dependant_test_setup() except: return self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, @@ -409,29 +388,34 @@ class LibvirtConnTestCase(test.TestCase): conn.get_cpu_info() except exception.Invalid, e: c1 = (0 <= e.message.find('Invalid xml')) - self.assertTrue(c1) - self.mox.UnsetStubs() + self.assertTrue(c1) def test_get_cpu_info_inappropreate_xml2(self): - """ - Check if get_cpu_info raises exception - in case libvirt.getCapabilities() returns wrong xml - (in case of xml doesnt have inproper tag - meaning missing "socket" attribute) - """ - xml = ("""x86_64Nehalem""" - """Intel""" - """""" - """""" - """""" - """""" - """""" - """""" - """""") + """Raise exception if given xml is inappropriate(topology tag).""" + xml = """ + x86_64 + Nehalem + Intel + + + + + + + + + + + + + + + + + """ try: - self._driver_dependent_test_setup() + self._driver_dependant_test_setup() except: return self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, @@ -444,29 +428,12 @@ class LibvirtConnTestCase(test.TestCase): conn.get_cpu_info() except exception.Invalid, e: c1 = (0 <= e.message.find('Invalid xml: topology')) - self.assertTrue(c1) - self.mox.UnsetStubs() + self.assertTrue(c1) def test_update_available_resource_works_correctly(self): - """ - In this method, vcpus/memory_mb/local_gb/vcpu_used/ - memory_mb_used/local_gb_used/hypervisor_type/ - hypervisor_version/cpu_info should be changed. - Based on this specification, this testcase confirms - if this method finishes successfully, - meaning self.db.service_update must be called with dictinary - - {'vcpu':aaa, 'memory_mb':bbb, 'local_gb':ccc, - 'vcpu_used':aaa, 'memory_mb_used':bbb, 'local_gb_sed':ccc, - 'hypervisor_type':ddd, 'hypervisor_version':eee, - 'cpu_info':fff} - - Since each value of above dict can be obtained through - driver(different depends on environment), - only dictionary keys are checked. - """ + """Confirm compute_service table is updated successfully.""" try: - self._driver_dependent_test_setup() + self._driver_dependant_test_setup() except: return @@ -478,7 +445,9 @@ class LibvirtConnTestCase(test.TestCase): host = 'foo' binary = 'nova-compute' - service_ref = {'id': 1, 'host': host, 'binary': binary, + service_ref = {'id': 1, + 'host': host, + 'binary': binary, 'topic': 'compute'} self.mox.StubOutWithMock(db, 'service_get_all_by_topic') @@ -491,15 +460,11 @@ class LibvirtConnTestCase(test.TestCase): self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) conn.update_available_resource(host) - self.mox.UnsetStubs() def test_update_resource_info_raise_exception(self): - """ - This testcase confirms if no record found on Service - table, exception can be raised. - """ + """Raise exception if no recorde found on services table.""" try: - self._driver_dependent_test_setup() + self._driver_dependant_test_setup() except: return @@ -518,18 +483,19 @@ class LibvirtConnTestCase(test.TestCase): msg = 'Cannot insert compute manager specific info' c1 = (0 <= e.message.find(msg)) self.assertTrue(c1) - self.mox.ResetAll() def test_compare_cpu_works_correctly(self): - """Calling libvirt.compute_cpu() and works correctly """ - - t = ("""{"arch":"%s", "model":"%s", "vendor":"%s", """ - """"topology":{"cores":"%s", "threads":"%s", """ - """"sockets":"%s"}, "features":[%s]}""") - cpu_info = t % ('x86', 'model', 'vendor', '2', '1', '4', '"tm"') + """Calling libvirt.compute_cpu() and works correctly.""" + t = {} + t['arch'] = 'x86' + t['model'] = 'model' + t['vendor'] = 'Intel' + t['topology'] = {'cores': "2", "threads": "1", "sockets": "4"} + t['features'] = ["tm"] + cpu_info = utils.dumps(t) try: - self._driver_dependent_test_setup() + self._driver_dependant_test_setup() except: return @@ -542,20 +508,19 @@ class LibvirtConnTestCase(test.TestCase): self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) self.assertTrue(None == conn.compare_cpu(cpu_info)) - self.mox.UnsetStubs() def test_compare_cpu_raises_exception(self): - """ - Libvirt-related exception occurs when calling - libvirt.compare_cpu(). - """ - t = ("""{"arch":"%s", "model":"%s", "vendor":"%s", """ - """"topology":{"cores":"%s", "threads":"%s", """ - """"sockets":"%s"}, "features":[%s]}""") - cpu_info = t % ('x86', 'model', 'vendor', '2', '1', '4', '"tm"') + """Libvirt-related exception occurs when calling compare_cpu().""" + t = {} + t['arch'] = 'x86' + t['model'] = 'model' + t['vendor'] = 'Intel' + t['topology'] = {'cores': "2", "threads": "1", "sockets": "4"} + t['features'] = ["tm"] + cpu_info = utils.dumps(t) try: - self._driver_dependent_test_setup() + self._driver_dependant_test_setup() except: return @@ -567,18 +532,19 @@ class LibvirtConnTestCase(test.TestCase): self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) self.assertRaises(libvirt.libvirtError, conn.compare_cpu, cpu_info) - self.mox.UnsetStubs() def test_compare_cpu_no_compatibility(self): - """libvirt.compare_cpu() return less than 0.(no compatibility)""" - - t = ("""{"arch":"%s", "model":"%s", "vendor":"%s", """ - """"topology":{"cores":"%s", "threads":"%s", """ - """"sockets":"%s"}, "features":[%s]}""") - cpu_info = t % ('x86', 'model', 'vendor', '2', '1', '4', '"tm"') + """Libvirt.compare_cpu() return less than 0.(no compatibility).""" + t = {} + t['arch'] = 'x86' + t['model'] = 'model' + t['vendor'] = 'Intel' + t['topology'] = {'cores': "2", "threads": "1", "sockets": "4"} + t['features'] = ["tm"] + cpu_info = utils.dumps(t) try: - self._driver_dependent_test_setup() + self._driver_dependant_test_setup() except: return @@ -590,16 +556,14 @@ class LibvirtConnTestCase(test.TestCase): self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) self.assertRaises(exception.Invalid, conn.compare_cpu, cpu_info) - self.mox.UnsetStubs() def test_ensure_filtering_rules_for_instance_works_correctly(self): - """ensure_filtering_rules_for_instance works as expected correctly""" - + """ensure_filtering_rules_for_instance() works successfully.""" instance_ref = models.Instance() instance_ref.__setitem__('id', 1) try: - nwmock, fwmock = self._driver_dependent_test_setup() + nwmock, fwmock = self._driver_dependant_test_setup() except: return @@ -613,16 +577,14 @@ class LibvirtConnTestCase(test.TestCase): self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) conn.ensure_filtering_rules_for_instance(instance_ref) - self.mox.UnsetStubs() def test_ensure_filtering_rules_for_instance_timeout(self): - """ensure_filtering_fules_for_instance finishes with timeout""" - + """ensure_filtering_fules_for_instance() finishes with timeout.""" instance_ref = models.Instance() instance_ref.__setitem__('id', 1) try: - nwmock, fwmock = self._driver_dependent_test_setup() + nwmock, fwmock = self._driver_dependant_test_setup() except: return @@ -642,11 +604,9 @@ class LibvirtConnTestCase(test.TestCase): except exception.Error, e: c1 = (0 <= e.message.find('Timeout migrating for')) self.assertTrue(c1) - self.mox.UnsetStubs() def test_live_migration_works_correctly(self): - """_live_migration works as expected correctly """ - + """_live_migration() works as expected correctly.""" class dummyCall(object): f = None @@ -659,7 +619,7 @@ class LibvirtConnTestCase(test.TestCase): ctxt = context.get_admin_context() try: - self._driver_dependent_test_setup() + self._driver_dependant_test_setup() except: return @@ -681,13 +641,9 @@ class LibvirtConnTestCase(test.TestCase): # Not setting post_method/recover_method in this testcase. ret = conn._live_migration(ctxt, i_ref, i_ref['host'], '', '') self.assertTrue(ret == None) - self.mox.UnsetStubs() def test_live_migration_raises_exception(self): - """ - _live_migration raises exception, then this testcase confirms - recovered method is called. - """ + """Confirms recover method is called when exceptions are raised.""" i_ref = models.Instance() i_ref.__setitem__('id', 1) i_ref.__setitem__('host', 'dummy') @@ -697,7 +653,7 @@ class LibvirtConnTestCase(test.TestCase): pass try: - nwmock, fwmock = self._driver_dependent_test_setup() + nwmock, fwmock = self._driver_dependant_test_setup() except: return @@ -724,7 +680,6 @@ class LibvirtConnTestCase(test.TestCase): conn._mlive_migration, ctxt, instance_ref, dest, '', dummy_recover_method) - self.mox.UnsetStubs() def tearDown(self): super(LibvirtConnTestCase, self).tearDown() diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 70ddd3aafa43..069a424d152d 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -326,59 +326,6 @@ class FakeConnection(object): 'username': 'fakeuser', 'password': 'fakepassword'} - def get_cpu_info(self): - """This method is supported only libvirt. """ - return - - def get_vcpu_total(self): - """This method is supported only libvirt. """ - return - - def get_memory_mb_total(self): - """This method is supported only libvirt. """ - return - - def get_local_gb_total(self): - """This method is supported only libvirt. """ - return - - def get_vcpu_used(self): - """This method is supported only libvirt. """ - return - - def get_memory_mb_used(self): - """This method is supported only libvirt. """ - return - - def get_local_gb_used(self): - """This method is supported only libvirt. """ - return - - def get_hypervisor_type(self): - """This method is supported only libvirt..""" - return - - def get_hypervisor_version(self): - """This method is supported only libvirt..""" - return - - def compare_cpu(self, xml): - """This method is supported only libvirt..""" - raise NotImplementedError('This method is supported only libvirt.') - - def ensure_filtering_rules_for_instance(self, instance_ref): - """This method is supported only libvirt..""" - return - - def live_migration(self, context, instance_ref, dest, - post_method, recover_method): - """This method is supported only libvirt..""" - return - - def unfilter_instance(self, instance_ref): - """This method is supported only libvirt..""" - raise NotImplementedError('This method is supported only libvirt.') - def refresh_security_group_rules(self, security_group_id): """This method is called after a change to security groups. @@ -428,20 +375,25 @@ class FakeConnection(object): return True def update_available_resource(self, ctxt, host): - """This method is supported only libvirt. """ + """This method is supported only by libvirt.""" return def compare_cpu(self, xml): - """This method is supported only libvirt..""" - raise NotImplementedError('This method is supported only libvirt.') + """This method is supported only by libvirt.""" + raise NotImplementedError('This method is supported only by libvirt.') def ensure_filtering_rules_for_instance(self, instance_ref): - """This method is supported only libvirt..""" - raise NotImplementedError('This method is supported only libvirt.') + """This method is supported only by libvirt.""" + raise NotImplementedError('This method is supported only by libvirt.') - def live_migration(self, context, instance_ref, dest): - """This method is supported only libvirt..""" - raise NotImplementedError('This method is supported only libvirt.') + def live_migration(self, context, instance_ref, dest, + post_method, recover_method): + """This method is supported only by libvirt.""" + return + + def unfilter_instance(self, instance_ref): + """This method is supported only by libvirt.""" + raise NotImplementedError('This method is supported only by libvirt.') class FakeInstance(object): diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index d39836e72a4f..934aed960b41 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -36,7 +36,6 @@ Supports KVM, QEMU, UML, and XEN. """ -import json import os import shutil import random @@ -105,7 +104,7 @@ flags.DEFINE_string('firewall_driver', 'Firewall driver (defaults to iptables)') flags.DEFINE_string('cpuinfo_xml_template', utils.abspath('virt/cpuinfo.xml.template'), - 'CpuInfo XML Template (used only live migration now)') + 'CpuInfo XML Template (Used only live migration now)') flags.DEFINE_string('live_migration_uri', "qemu+tcp://%s/system", 'Define protocol used by live_migration feature') @@ -851,23 +850,46 @@ class LibvirtConnection(object): return interfaces def get_vcpu_total(self): - """ Get vcpu number of physical computer. """ + """Get vcpu number of physical computer. + + :returns: the number of cpu core. + + """ + return open('/proc/cpuinfo').read().count('processor') def get_memory_mb_total(self): - """Get the total memory size(MB) of physical computer .""" + """Get the total memory size(MB) of physical computer. + + :returns: the total amount of memory(MB). + + """ + meminfo = open('/proc/meminfo').read().split() idx = meminfo.index('MemTotal:') # transforming kb to mb. return int(meminfo[idx + 1]) / 1024 def get_local_gb_total(self): - """Get the total hdd size(GB) of physical computer .""" + """Get the total hdd size(GB) of physical computer. + + :returns: + The total amount of HDD(GB). + Note that this value shows a partition where + NOVA-INST-DIR/instances mounts. + + """ + hddinfo = os.statvfs(FLAGS.instances_path) return hddinfo.f_frsize * hddinfo.f_blocks / 1024 / 1024 / 1024 def get_vcpu_used(self): - """ Get vcpu available number of physical computer. """ + """ Get vcpu usage number of physical computer. + + :returns: The total number of vcpu that currently used. + + """ + total = 0 for i in self._conn.listDomainsID(): dom = self._conn.lookupByID(i) @@ -875,7 +897,12 @@ class LibvirtConnection(object): return total def get_memory_mb_used(self): - """Get the free memory size(MB) of physical computer.""" + """Get the free memory size(MB) of physical computer. + + :returns: the total usage of memory(MB). + + """ + m = open('/proc/meminfo').read().split() idx1 = m.index('MemFree:') idx2 = m.index('Buffers:') @@ -884,21 +911,47 @@ class LibvirtConnection(object): return self.get_memory_mb_total() - avail def get_local_gb_used(self): - """Get the free hdd size(GB) of physical computer .""" + """Get the free hdd size(GB) of physical computer. + + :returns: + The total usage of HDD(GB). + Note that this value shows a partition where + NOVA-INST-DIR/instances mounts. + + """ + hddinfo = os.statvfs(FLAGS.instances_path) avail = hddinfo.f_frsize * hddinfo.f_bavail / 1024 / 1024 / 1024 return self.get_local_gb_total() - avail def get_hypervisor_type(self): - """ Get hypervisor type """ + """Get hypervisor type. + + :returns: hypervisor type (ex. qemu) + + """ + return self._conn.getType() def get_hypervisor_version(self): - """ Get hypervisor version """ + """Get hypervisor version. + + :returns: hypervisor version (ex. 12003) + + """ + return self._conn.getVersion() def get_cpu_info(self): - """ Get cpuinfo information """ + """Get cpuinfo information. + + Obtains cpu feature from virConnect.getCapabilities, + and returns as a json string. + + :return: see above description + + """ + xml = self._conn.getCapabilities() xml = libxml2.parseDoc(xml) nodes = xml.xpathEval('//cpu') @@ -931,17 +984,9 @@ class LibvirtConnection(object): for nodes in feature_nodes: features.append(nodes.get_properties().getContent()) - template = ("""{"arch":"%s", "model":"%s", "vendor":"%s", """ - """"topology":{"cores":"%s", "threads":"%s", """ - """"sockets":"%s"}, "features":[%s]}""") - f = ['"%s"' % x for x in features] - return template % (cpu_info['arch'], - cpu_info['model'], - cpu_info['vendor'], - topology['cores'], - topology['sockets'], - topology['threads'], - ', '.join(f)) + cpu_info['topology'] = topology + cpu_info['features'] = features + return utils.dumps(cpu_info) def block_stats(self, instance_name, disk): """ @@ -974,12 +1019,16 @@ class LibvirtConnection(object): self.firewall_driver.refresh_security_group_members(security_group_id) def update_available_resource(self, ctxt, host): - """ - Update compute manager resource info on Service table. + """Updates compute manager resource info on ComputeService table. + This method is called when nova-coompute launches, and whenever admin executes "nova-manage service update_resource". + :param ctxt: security context + :param host: hostname that compute manager is currently running + """ + try: service_ref = db.service_get_all_compute_by_host(ctxt, host)[0] except exception.NotFound: @@ -1008,44 +1057,44 @@ class LibvirtConnection(object): db.compute_service_update(ctxt, compute_service_ref[0]['id'], dic) def compare_cpu(self, cpu_info): - """ - Check the host cpu is compatible to a cpu given by xml. + """Checks the host cpu is compatible to a cpu given by xml. + "xml" must be a part of libvirt.openReadonly().getCapabilities(). return values follows by virCPUCompareResult. if 0 > return value, do live migration. - 'http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult' + + :param cpu_info: json string that shows cpu feature(see get_cpu_info()) + :returns: + None. if given cpu info is not compatible to this server, + raise exception. + """ - msg = _('Checking cpu_info: instance was launched this cpu.\n: %s ') - LOG.info(msg % cpu_info) - dic = json.loads(cpu_info) + + LOG.info(_('Checking cpu_info: instance was launched this cpu.\n%s') + % cpu_info) + dic = utils.loads(cpu_info) xml = str(Template(self.cpuinfo_xml, searchList=dic)) - msg = _('to xml...\n: %s ') - LOG.info(msg % xml) - - url = 'http://libvirt.org/html/libvirt-libvirt.html' - url += '#virCPUCompareResult\n' - msg = 'CPU does not have compativility.\n' - msg += 'result:%s \n' - msg += 'Refer to %s' - msg = _(msg) + LOG.info(_('to xml...\n:%s ' % xml)) + u = "http://libvirt.org/html/libvirt-libvirt.html#virCPUCompareResult" + m = _("CPU doesn't have compatibility.\n\n%(ret)s\n\nRefer to %(u)s") # unknown character exists in xml, then libvirt complains try: ret = self._conn.compareCPU(xml, 0) except libvirt.libvirtError, e: - LOG.error(msg % (e.message, url)) - raise e + ret = e.message + LOG.error(m % locals()) + raise if ret <= 0: - raise exception.Invalid(msg % (ret, url)) + raise exception.Invalid(m % locals()) return def ensure_filtering_rules_for_instance(self, instance_ref): - """ - Setting up inevitable filtering rules on compute node, - and waiting for its completion. + """Setting up filtering rules and waiting for its completion. + To migrate an instance, filtering rules to hypervisors and firewalls are inevitable on destination host. ( Waiting only for filterling rules to hypervisor, @@ -1062,9 +1111,12 @@ class LibvirtConnection(object): Don't use thread for this method since migration should not be started when setting-up filtering rules operations are not completed. + + :params instance_ref: nova.db.sqlalchemy.models.Instance object + """ - # Tf any instances never launch at destination host, + # If any instances never launch at destination host, # basic-filtering must be set here. self.firewall_driver.setup_basic_filtering(instance_ref) # setting up n)ova-instance-instance-xx mainly. @@ -1088,16 +1140,42 @@ class LibvirtConnection(object): def live_migration(self, ctxt, instance_ref, dest, post_method, recover_method): + """Spawning live_migration operation for distributing high-load. + + :params ctxt: security context + :params instance_ref: + nova.db.sqlalchemy.models.Instance object + instance object that is migrated. + :params dest: destination host + :params post_method: + post operation method. + expected nova.compute.manager.post_live_migration. + :params recover_method: + recovery method when any exception occurs. + expected nova.compute.manager.recover_live_migration. + """ - Just spawning live_migration operation for - distributing high-load. - """ + greenthread.spawn(self._live_migration, ctxt, instance_ref, dest, post_method, recover_method) def _live_migration(self, ctxt, instance_ref, dest, post_method, recover_method): - """ Do live migration.""" + """Do live migration. + + :params ctxt: security context + :params instance_ref: + nova.db.sqlalchemy.models.Instance object + instance object that is migrated. + :params dest: destination host + :params post_method: + post operation method. + expected nova.compute.manager.post_live_migration. + :params recover_method: + recovery method when any exception occurs. + expected nova.compute.manager.recover_live_migration. + + """ # Do live migration. try: @@ -1122,7 +1200,7 @@ class LibvirtConnection(object): except Exception, e: recover_method(ctxt, instance_ref) - raise e + raise # Waiting for completion of live_migration. timer = utils.LoopingCall(f=None) @@ -1139,7 +1217,7 @@ class LibvirtConnection(object): timer.start(interval=0.5, now=True) def unfilter_instance(self, instance_ref): - """See comments of same method in firewall_driver""" + """See comments of same method in firewall_driver.""" self.firewall_driver.unfilter_instance(instance_ref) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index 3dd7d6e94a52..0e12a4587e51 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -231,25 +231,25 @@ class XenAPIConnection(object): 'password': FLAGS.xenapi_connection_password} def update_available_resource(self, ctxt, host): - """This method is supported only libvirt. """ + """This method is supported only by libvirt.""" return def compare_cpu(self, xml): - """This method is supported only libvirt..""" - raise NotImplementedError('This method is supported only libvirt.') + """This method is supported only by libvirt.""" + raise NotImplementedError('This method is supported only by libvirt.') def ensure_filtering_rules_for_instance(self, instance_ref): - """This method is supported only libvirt..""" + """This method is supported only libvirt.""" return def live_migration(self, context, instance_ref, dest, post_method, recover_method): - """This method is supported only libvirt..""" + """This method is supported only by libvirt.""" return def unfilter_instance(self, instance_ref): - """This method is supported only libvirt..""" - raise NotImplementedError('This method is supported only libvirt.') + """This method is supported only by libvirt.""" + raise NotImplementedError('This method is supported only by libvirt.') class XenAPISession(object): diff --git a/nova/volume/driver.py b/nova/volume/driver.py index b74c74197ea4..fbc52a598322 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -140,7 +140,7 @@ class VolumeDriver(object): def check_for_export(self, context, volume_id): """Make sure volume is exported.""" - return True + raise NotImplementedError() class AOEDriver(VolumeDriver): @@ -229,9 +229,9 @@ class AOEDriver(VolumeDriver): break if not exported: # Instance will be terminated in this case. - desc = _("""Cannot confirm exported volume id:%(volume_id)s.""" - """vblade process for e%(shelf_id)s.%(blade_id)s """ - """isn't running.""") % locals() + desc = _("Cannot confirm exported volume id:%(volume_id)s." + "vblade process for e%(shelf_id)s.%(blade_id)s " + "isn't running.") % locals() raise exception.ProcessExecutionError(out, _err, cmd=cmd, description=desc) @@ -373,9 +373,9 @@ class ISCSIDriver(VolumeDriver): # Instances remount read-only in this case. # /etc/init.d/iscsitarget restart and rebooting nova-volume # is better since ensure_export() works at boot time. - logging.error(_("""Cannot confirm exported volume """ - """id:%(volume_id)s.""") % locals()) - raise e + logging.error(_("Cannot confirm exported volume " + "id:%(volume_id)s.") % locals()) + raise class FakeISCSIDriver(ISCSIDriver): From c6b2d07f47004576fa386a6d270203b1d7937664 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Wed, 23 Feb 2011 00:15:39 +0900 Subject: [PATCH 06/76] Fix tiny mitakes! (remove unnecessary comment, etc) --- nova/tests/test_scheduler.py | 2 +- nova/tests/test_volume.py | 3 --- nova/virt/libvirt_conn.py | 2 +- 3 files changed, 2 insertions(+), 5 deletions(-) diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index 301106848c7e..47a6d0e8289d 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -258,10 +258,10 @@ class SimpleDriverTestCase(test.TestCase): inst['project_id'] = self.project.id inst['instance_type'] = 'm1.tiny' inst['mac_address'] = utils.generate_mac() + inst['vcpus'] = kwargs.get('vcpus', 1) inst['ami_launch_index'] = 0 inst['availability_zone'] = kwargs.get('availability_zone', None) inst['host'] = kwargs.get('host', 'dummy') - inst['vcpus'] = kwargs.get('vcpus', 4) inst['memory_mb'] = kwargs.get('memory_mb', 20) inst['local_gb'] = kwargs.get('local_gb', 30) inst['launched_on'] = kwargs.get('launghed_on', 'dummy') diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py index 6ae075caa236..e8b4ceee8f48 100644 --- a/nova/tests/test_volume.py +++ b/nova/tests/test_volume.py @@ -318,9 +318,6 @@ class ISCSITestCase(DriverTestCase): mountpoint = "/dev/sd" + chr((ord('b') + index)) db.volume_attached(self.context, vol_ref['id'], self.instance_id, mountpoint) - #iscsi_target = db.volume_allocate_iscsi_target(self.context, - # vol_ref['id'], - # vol_ref['host']) volume_id_list.append(vol_ref['id']) return volume_id_list diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 934aed960b41..118ea13e548f 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -888,7 +888,7 @@ class LibvirtConnection(object): :returns: The total number of vcpu that currently used. - """ + """ total = 0 for i in self._conn.listDomainsID(): From 485a6c5a9502679bc5ecf02f8e758170ac0335dc Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Wed, 23 Feb 2011 01:20:39 +0900 Subject: [PATCH 07/76] Fixed some docstring --- nova/scheduler/manager.py | 2 +- nova/tests/test_scheduler.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index 783594c6f19d..cd5012fd5c11 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -59,7 +59,7 @@ class SchedulerManager(manager.Manager): try: host = getattr(self.driver, driver_method)(elevated, *args, **kwargs) - except AttributeError, e: + except AttributeError: host = self.driver.schedule(elevated, topic, *args, **kwargs) rpc.cast(context, diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index 47a6d0e8289d..71e524bca968 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -60,6 +60,7 @@ class SchedulerTestCase(test.TestCase): self.flags(scheduler_driver='nova.tests.test_scheduler.TestDriver') def _create_compute_service(self): + """Create compute-manager(ComputeService and Service record).""" ctxt = context.get_admin_context() dic = {'host': 'dummy', 'binary': 'nova-compute', 'topic': 'compute', 'report_count': 0, 'availability_zone': 'dummyzone'} @@ -150,7 +151,7 @@ class SchedulerTestCase(test.TestCase): db.service_destroy(ctxt, s_ref['id']) def test_show_host_resources_works_correctly(self): - """show_host_resources() works correctly as expected.""" + """Show_host_resources() works correctly as expected.""" scheduler = manager.SchedulerManager() ctxt = context.get_admin_context() From 1caa7f189827b4721c2e9d3ddf753acd749d7916 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Tue, 1 Mar 2011 17:52:46 +0900 Subject: [PATCH 08/76] rename db migration script --- nova/compute/manager.py | 2 +- .../versions/007_add_live_migration.py | 83 +++++++++++++++++++ 2 files changed, 84 insertions(+), 1 deletion(-) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/007_add_live_migration.py diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 8b90ffbcad15..d085a0b6a63a 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -818,7 +818,7 @@ class ComputeManager(manager.Manager): LOG.info(_('Migrating %(i_name)s to %(dest)s finishes successfully.') % locals()) - LOG.info(_("The below error is normally occurs." + LOG.info(_("The below error is normally occurs. " "Just check if instance is successfully migrated.\n" "libvir: QEMU error : Domain not found: no domain " "with matching name..")) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/007_add_live_migration.py b/nova/db/sqlalchemy/migrate_repo/versions/007_add_live_migration.py new file mode 100644 index 000000000000..2689b5b7485d --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/007_add_live_migration.py @@ -0,0 +1,83 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from migrate import * +from nova import log as logging +from sqlalchemy import * + + +meta = MetaData() + +instances = Table('instances', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +# +# New Tables +# + +compute_services = Table('compute_services', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('service_id', Integer(), nullable=False), + + Column('vcpus', Integer(), nullable=False), + Column('memory_mb', Integer(), nullable=False), + Column('local_gb', Integer(), nullable=False), + Column('vcpus_used', Integer(), nullable=False), + Column('memory_mb_used', Integer(), nullable=False), + Column('local_gb_used', Integer(), nullable=False), + Column('hypervisor_type', + Text(convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=False), + Column('hypervisor_version', Integer(), nullable=False), + Column('cpu_info', + Text(convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=False), + ) + + +# +# Tables to alter +# +instances_launched_on = Column( + 'launched_on', + Text(convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + nullable=True) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + try: + compute_services.create() + except Exception: + logging.info(repr(compute_services)) + logging.exception('Exception while creating table') + meta.drop_all(tables=[compute_services]) + raise + + instances.create_column(instances_launched_on) From d13a623625a56a029f9dd5ccba3e70f492efdb2c Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Tue, 1 Mar 2011 18:32:57 +0900 Subject: [PATCH 09/76] test_compute is changed b/c lack of import instance_types --- nova/tests/test_compute.py | 1 + 1 file changed, 1 insertion(+) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 3c88d186d081..2a18dd47b9ee 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -32,6 +32,7 @@ from nova import rpc from nova import test from nova import utils from nova.auth import manager +from nova.compute import instance_types from nova.compute import manager as compute_manager from nova.compute import power_state from nova.db.sqlalchemy import models From 688acacd85e07fc578c8731df6a4421e64499c8b Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Tue, 1 Mar 2011 18:53:02 +0900 Subject: [PATCH 10/76] At previous commit, I forget to erase conflict - fixed it. --- nova/tests/test_compute.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 75fbc9324d73..3438719f4cd9 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -309,7 +309,13 @@ class ComputeTestCase(test.TestCase): self.compute.terminate_instance(self.context, instance_id) -<<<<<<< TREE + def test_get_by_flavor_id(self): + type = instance_types.get_by_flavor_id(1) + self.assertEqual(type, 'm1.tiny') + + type = instance_types.get_by_flavor_id("1") + self.assertEqual(type, 'm1.tiny') + def _setup_other_managers(self): self.volume_manager = utils.import_object(FLAGS.volume_manager) self.network_manager = utils.import_object(FLAGS.network_manager) @@ -562,11 +568,3 @@ class ComputeTestCase(test.TestCase): db.instance_destroy(c, instance_id) db.volume_destroy(c, v_ref['id']) db.floating_ip_destroy(c, flo_addr) -======= - def test_get_by_flavor_id(self): - type = instance_types.get_by_flavor_id(1) - self.assertEqual(type, 'm1.tiny') - - type = instance_types.get_by_flavor_id("1") - self.assertEqual(type, 'm1.tiny') ->>>>>>> MERGE-SOURCE From cdb1b16a6019fd68a7969666d754c4007607ae53 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Tue, 1 Mar 2011 23:18:37 +0000 Subject: [PATCH 11/76] * Added ability to launch XenServer instances with per-os vm-params. --- nova/compute/api.py | 5 +- .../versions/007_add_os_type_to_instances.py | 45 +++++ nova/db/sqlalchemy/models.py | 2 + nova/virt/xenapi/vm_utils.py | 156 ++++++++++++------ nova/virt/xenapi/vmops.py | 17 +- 5 files changed, 166 insertions(+), 59 deletions(-) create mode 100644 nova/db/sqlalchemy/migrate_repo/versions/007_add_os_type_to_instances.py diff --git a/nova/compute/api.py b/nova/compute/api.py index 625778b66520..8bdf712a0b25 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -125,6 +125,8 @@ class API(base.Base): raise quota.QuotaError(msg, "MetadataLimitExceeded") image = self.image_service.show(context, image_id) + os_type = image['properties'].get('os_type', 'linux') + if kernel_id is None: kernel_id = image.get('kernel_id', None) if ramdisk_id is None: @@ -180,7 +182,8 @@ class API(base.Base): 'key_data': key_data, 'locked': False, 'metadata': metadata, - 'availability_zone': availability_zone} + 'availability_zone': availability_zone, + 'os_type': os_type} elevated = context.elevated() instances = [] LOG.debug(_("Going to run %s instances..."), num_instances) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/007_add_os_type_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/007_add_os_type_to_instances.py new file mode 100644 index 000000000000..d6d964b953a0 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/007_add_os_type_to_instances.py @@ -0,0 +1,45 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + +instances = Table('instances', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + +# FIXME(dubs) should this be not null? Maybe create as nullable, then +# populate all existing rows with 'linux', then adding not null constraint. +instances_os_type = Column('os_type', + String(length=255, convert_unicode=False, + assert_unicode=None, unicode_error=None, + _warn_on_bytestring=False), + nullable=True) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + instances.create_column(instances_os_type) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 1882efeba3e4..b78c95e40d2c 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -188,6 +188,8 @@ class Instance(BASE, NovaBase): locked = Column(Boolean) + os_type = Column(String(255)) + # TODO(vish): see Ewan's email about state improvements, probably # should be in a driver base class or some such # vmstate_state = running, halted, suspended, paused diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 6d9aeb0609d0..11f1fabe93be 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -80,62 +80,80 @@ class VMHelper(HelperBase): """ @classmethod - def create_vm(cls, session, instance, kernel, ramdisk, pv_kernel=False): + def create_vm(cls, session, instance, kernel, ramdisk, use_pv_kernel=False): """Create a VM record. Returns a Deferred that gives the new VM reference. - the pv_kernel flag indicates whether the guest is HVM or PV + the use_pv_kernel flag indicates whether the guest is HVM or PV + + There are 3 scenarios: + + 1. Using paravirtualization, kernel passed in + + 2. Using paravirtualization, kernel within the image + + 3. Using hardware virtualization """ instance_type = instance_types.INSTANCE_TYPES[instance.instance_type] mem = str(long(instance_type['memory_mb']) * 1024 * 1024) vcpus = str(instance_type['vcpus']) rec = { - 'name_label': instance.name, - 'name_description': '', + 'actions_after_crash': 'destroy', + 'actions_after_reboot': 'restart', + 'actions_after_shutdown': 'destroy', + 'affinity': '', + 'blocked_operations': {}, + 'ha_always_run': False, + 'ha_restart_priority': '', + 'HVM_boot_params': {}, + 'HVM_boot_policy': '', 'is_a_template': False, - 'memory_static_min': '0', - 'memory_static_max': mem, 'memory_dynamic_min': mem, 'memory_dynamic_max': mem, + 'memory_static_min': '0', + 'memory_static_max': mem, + 'memory_target': mem, + 'name_description': '', + 'name_label': instance.name, +# 'other_config': {'allowvssprovider': False}, + 'other_config': {}, + 'PCI_bus': '', + 'platform': {'acpi': 'true', 'apic': 'true', 'pae': 'true', + 'viridian': 'true', 'timeoffset': '0'}, + 'PV_args': '', + 'PV_bootloader': '', + 'PV_bootloader_args': '', + 'PV_kernel': '', + 'PV_legacy_args': '', + 'PV_ramdisk': '', + 'recommendations': '', + 'tags': [], + 'user_version': '0', 'VCPUs_at_startup': vcpus, 'VCPUs_max': vcpus, 'VCPUs_params': {}, - 'actions_after_shutdown': 'destroy', - 'actions_after_reboot': 'restart', - 'actions_after_crash': 'destroy', - 'PV_bootloader': '', - 'PV_kernel': '', - 'PV_ramdisk': '', - 'PV_args': '', - 'PV_bootloader_args': '', - 'PV_legacy_args': '', - 'HVM_boot_policy': '', - 'HVM_boot_params': {}, - 'platform': {}, - 'PCI_bus': '', - 'recommendations': '', - 'affinity': '', - 'user_version': '0', - 'other_config': {}, + 'xenstore_data': {} } - #Complete VM configuration record according to the image type - #non-raw/raw with PV kernel/raw in HVM mode - if instance.kernel_id: - rec['PV_bootloader'] = '' - rec['PV_kernel'] = kernel - rec['PV_ramdisk'] = ramdisk - rec['PV_args'] = 'root=/dev/xvda1' - rec['PV_bootloader_args'] = '' - rec['PV_legacy_args'] = '' - else: - if pv_kernel: - rec['PV_args'] = 'noninteractive' - rec['PV_bootloader'] = 'pygrub' + + # Complete VM configuration record according to the image type + # non-raw/raw with PV kernel/raw in HVM mode + if use_pv_kernel: + rec['platform']['nx'] = 'false' + if instance.kernel_id: + # 1. Kernel explicitly passed in, use that + rec['PV_args'] = 'root=/dev/xvda1' + rec['PV_kernel'] = kernel + rec['PV_ramdisk'] = ramdisk else: - rec['HVM_boot_policy'] = 'BIOS order' - rec['HVM_boot_params'] = {'order': 'dc'} - rec['platform'] = {'acpi': 'true', 'apic': 'true', - 'pae': 'true', 'viridian': 'true'} + # 2. Use kernel within the image + rec['PV_args'] = 'clocksource=jiffies' + rec['PV_bootloader'] = 'pygrub' + else: + # 3. Using hardware virtualization + rec['platform']['nx'] = 'true' + rec['HVM_boot_params'] = {'order': 'dc'} + rec['HVM_boot_policy'] = 'BIOS order' + LOG.debug(_('Created VM %s...'), instance.name) vm_ref = session.call_xenapi('VM.create', rec) instance_name = instance.name @@ -497,17 +515,32 @@ class VMHelper(HelperBase): return uuid @classmethod - def lookup_image(cls, session, instance_id, vdi_ref): + def determine_is_pv(cls, session, instance_id, vdi_ref, disk_image_type, + os_type): """ - Determine if VDI is using a PV kernel + Determine whether the VM will use a paravirtualized kernel or if it + will use hardware virtualization. + + 1. Objectstore (any image type): + We use plugin to figure out whether the VDI uses PV + + 2. Glance (VHD): then we use `os_type`, raise if not set + + 3. Glance (DISK_RAW): use Pygrub to figure out if pv kernel is + available + + 4. Glance (DISK): pv is assumed """ if FLAGS.xenapi_image_service == 'glance': - return cls._lookup_image_glance(session, vdi_ref) + # 2, 3, 4: Glance + return cls._determine_is_pv_glance( + session, vdi_ref, disk_image_type, os_type) else: - return cls._lookup_image_objectstore(session, instance_id, vdi_ref) + # 1. Objecstore + return cls._determine_is_pv_objectstore(session, instance_id, vdi_ref) @classmethod - def _lookup_image_objectstore(cls, session, instance_id, vdi_ref): + def _determine_is_pv_objectstore(cls, session, instance_id, vdi_ref): LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref) fn = "is_vdi_pv" args = {} @@ -523,9 +556,38 @@ class VMHelper(HelperBase): return pv @classmethod - def _lookup_image_glance(cls, session, vdi_ref): + def _determine_is_pv_glance(cls, session, vdi_ref, disk_image_type, + os_type): + """ + For a Glance image, determine if we need paravirtualization. + + The relevant scenarios are: + 2. Glance (VHD): then we use `os_type`, raise if not set + + 3. Glance (DISK_RAW): use Pygrub to figure out if pv kernel is + available + + 4. Glance (DISK): pv is assumed + """ + LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref) - return with_vdi_attached_here(session, vdi_ref, True, _is_vdi_pv) + if disk_image_type == ImageType.DISK_VHD: + # 2. VHD + if os_type == 'windows': + is_pv = False + else: + is_pv = True + elif disk_image_type == ImageType.DISK_RAW: + # 3. RAW + is_pv = with_vdi_attached_here(session, vdi_ref, True, _is_vdi_pv) + elif disk_image_type == ImageType.DISK: + # 4. Disk + is_pv = True + else: + raise exception.Error(_("Unknown image format %(disk_image_type)s") + % locals()) + + return is_pv @classmethod def lookup(cls, session, i): diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index bc39aa1402c4..abc1fb699261 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -87,15 +87,7 @@ class VMOps(object): vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) - pv_kernel = False - if disk_image_type == ImageType.DISK_RAW: - #Have a look at the VDI and see if it has a PV kernel - pv_kernel = VMHelper.lookup_image(self._session, instance.id, - vdi_ref) - elif disk_image_type == ImageType.DISK_VHD: - # TODO(sirp): Assuming PV for now; this will need to be - # configurable as Windows will use HVM. - pv_kernel = True + os_type = instance.get('os_type', 'linux') kernel = None if instance.kernel_id: @@ -107,8 +99,11 @@ class VMOps(object): ramdisk = VMHelper.fetch_image(self._session, instance.id, instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK) - vm_ref = VMHelper.create_vm(self._session, - instance, kernel, ramdisk, pv_kernel) + use_pv_kernel = VMHelper.determine_is_pv( + self._session, instance.id, vdi_ref, disk_image_type, os_type) + vm_ref = VMHelper.create_vm(self._session, instance, kernel, ramdisk, + use_pv_kernel) + VMHelper.create_vbd(self._session, vm_ref, vdi_ref, 0, True) # inject_network_info and create vifs From 6321c5047c082bba8edf10a660fdb6a56430cc44 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Wed, 2 Mar 2011 00:19:02 +0000 Subject: [PATCH 12/76] * Added first cut of migration for os_type on instances table * Track os_type when taking snapshots --- .../versions/007_add_os_type_to_instances.py | 4 +++- nova/virt/xenapi/vm_utils.py | 9 ++++++--- nova/virt/xenapi/vmops.py | 4 ++-- plugins/xenserver/xenapi/etc/xapi.d/plugins/glance | 11 +++++++---- 4 files changed, 18 insertions(+), 10 deletions(-) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/007_add_os_type_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/007_add_os_type_to_instances.py index d6d964b953a0..21f21b040d90 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/007_add_os_type_to_instances.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/007_add_os_type_to_instances.py @@ -34,7 +34,7 @@ instances_os_type = Column('os_type', String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False), - nullable=True) + nullable=False) def upgrade(migrate_engine): @@ -43,3 +43,5 @@ def upgrade(migrate_engine): meta.bind = migrate_engine instances.create_column(instances_os_type) + + diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 11f1fabe93be..9c0bb55790cd 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -303,7 +303,7 @@ class VMHelper(HelperBase): return template_vm_ref, template_vdi_uuids @classmethod - def upload_image(cls, session, instance_id, vdi_uuids, image_id): + def upload_image(cls, session, instance, vdi_uuids, image_id): """ Requests that the Glance plugin bundle the specified VDIs and push them into Glance using the specified human-friendly name. """ @@ -312,15 +312,18 @@ class VMHelper(HelperBase): logging.debug(_("Asking xapi to upload %(vdi_uuids)s as" " ID %(image_id)s") % locals()) + # TODO(dubs): os_type is currently defaulting to linux, we actually + # want to make this a NOT NULL column and require it to be specified. params = {'vdi_uuids': vdi_uuids, 'image_id': image_id, 'glance_host': FLAGS.glance_host, 'glance_port': FLAGS.glance_port, - 'sr_path': get_sr_path(session)} + 'sr_path': get_sr_path(session), + 'os_type': instance.get('os_type', 'linux')} kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'upload_vhd', kwargs) - session.wait_for_task(instance_id, task) + session.wait_for_task(instance.id, task) @classmethod def fetch_image(cls, session, instance_id, image, user, project, diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index abc1fb699261..1edf39c5b7b7 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -238,11 +238,11 @@ class VMOps(object): try: # call plugin to ship snapshot off to glance VMHelper.upload_image( - self._session, instance.id, template_vdi_uuids, image_id) + self._session, instance, template_vdi_uuids, image_id) finally: self._destroy(instance, template_vm_ref, shutdown=False, destroy_kernel_ramdisk=False) - + logging.debug(_("Finished snapshot and upload for VM %s"), instance) def reboot(self, instance): diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 7531af4ec72f..160bf482f1d3 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -191,7 +191,7 @@ def _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids): os.link(source, link_name) -def _upload_tarball(staging_path, image_id, glance_host, glance_port): +def _upload_tarball(staging_path, image_id, glance_host, glance_port, os_type): """ Create a tarball of the image and then stream that into Glance using chunked-transfer-encoded HTTP. @@ -205,9 +205,10 @@ def _upload_tarball(staging_path, image_id, glance_host, glance_port): headers = { 'content-type': 'application/octet-stream', 'transfer-encoding': 'chunked', - 'x-image-meta-is_public': 'True', + 'x-image-meta-is-public': 'True', 'x-image-meta-status': 'queued', - 'x-image-meta-type': 'vhd' + 'x-image-meta-type': 'vhd', + 'x-image-meta-property-os-type': os_type } for header, value in headers.iteritems(): conn.putheader(header, value) @@ -330,11 +331,13 @@ def upload_vhd(session, args): glance_host = params["glance_host"] glance_port = params["glance_port"] sr_path = params["sr_path"] + os_type = params["os_type"] staging_path = _make_staging_area(sr_path) try: _prepare_staging_area_for_upload(sr_path, staging_path, vdi_uuids) - _upload_tarball(staging_path, image_id, glance_host, glance_port) + _upload_tarball(staging_path, image_id, glance_host, glance_port, + os_type) finally: _cleanup_staging_area(staging_path) From 693e4335dbef72317147abd70bdaa10e0d174020 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Thu, 3 Mar 2011 22:54:11 +0900 Subject: [PATCH 13/76] Fixed based on reviewer's comments. Main changes are below. 1. Rename nova.compute.manager.ComputeManager.mktmpfile for better naming. 2. Several tests code in tests/test_virt.py are removed. Because it only works in libvirt environment. Only db-related testcode remains. --- nova/compute/manager.py | 53 +++-- nova/scheduler/driver.py | 74 +++---- nova/scheduler/manager.py | 18 +- nova/tests/test_scheduler.py | 14 +- nova/tests/test_virt.py | 371 +++++++---------------------------- nova/virt/libvirt_conn.py | 10 +- nova/volume/manager.py | 4 +- 7 files changed, 170 insertions(+), 374 deletions(-) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index d085a0b6a63a..7104daa1e554 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -624,11 +624,12 @@ class ComputeManager(manager.Manager): return self.driver.compare_cpu(cpu_info) @exception.wrap_exception - def mktmpfile(self, context): + def create_shared_storage_test_file(self, context): """Makes tmpfile under FLAGS.instance_path. This method enables compute nodes to recognize that they mounts - same shared storage. mktmpfile()/confirm_tmpfile is a pair. + same shared storage. (create|check|creanup)_shared_storage_test_file() + is a pair. :param context: security context :returns: tmpfile name(basename) @@ -636,26 +637,36 @@ class ComputeManager(manager.Manager): """ dirpath = FLAGS.instances_path - fd, name = tempfile.mkstemp(dir=dirpath) + fd, tmp_file = tempfile.mkstemp(dir=dirpath) LOG.debug(_("Creating tmpfile %s to notify to other " - "compute node that they mounts same storage.") % name) + "compute node that they mounts same storage.") % tmp_file) os.fdopen(fd, 'w+').close() - return os.path.basename(name) + return os.path.basename(tmp_file) @exception.wrap_exception - def confirm_tmpfile(self, context, filename): - """Confirms existence of the tmpfile given by path. + def check_shared_storage_test_file(self, context, filename): + """Confirms existence of the tmpfile under FLAGS.instances_path. :param context: security context :param filename: confirm existence of FLAGS.instances_path/thisfile - :returns: depends on os.remove() """ - p = os.path.join(FLAGS.instances_path, filename) - if not os.path.exists(p): - raise exception.NotFound(_('%s not found') % p) - return os.remove(p) + tmp_file = os.path.join(FLAGS.instances_path, filename) + if not os.path.exists(tmp_file): + raise exception.NotFound(_('%s not found') % tmp_file) + + @exception.wrap_exception + def cleanup_shared_storage_test_file(self, context, filename): + """Removes existence of the tmpfile under FLAGS.instances_path. + + :param context: security context + :param filename: remove existence of FLAGS.instances_path/thisfile + + """ + + tmp_file = os.path.join(FLAGS.instances_path, filename) + os.remove(tmp_file) @exception.wrap_exception def update_available_resource(self, context): @@ -687,7 +698,7 @@ class ComputeManager(manager.Manager): raise exception.NotFound(msg % locals()) # If any volume is mounted, prepare here. - if len(instance_ref['volumes']) == 0: + if not instance_ref['volumes']: LOG.info(_("%s has no volume."), ec2_id) else: for v in instance_ref['volumes']: @@ -701,16 +712,16 @@ class ComputeManager(manager.Manager): # Retry operation is necessary because continuously request comes, # concorrent request occurs to iptables, then it complains. max_retry = FLAGS.live_migration_retry_count - for i in range(max_retry): + for cnt in range(max_retry): try: self.network_manager.setup_compute_network(context, instance_id) break - except exception.ProcessExecutionError, e: - if i == max_retry - 1: + except exception.ProcessExecutionError: + if cnt == max_retry - 1: raise else: - LOG.warn(_("setup_compute_network() failed %(i)d." + LOG.warn(_("setup_compute_network() failed %(cnt)d." "Retry up to %(max_retry)d for %(ec2_id)s.") % locals()) time.sleep(1) @@ -739,7 +750,7 @@ class ComputeManager(manager.Manager): try: # Checking volume node is working correctly when any volumes # are attached to instances. - if len(instance_ref['volumes']) != 0: + if instance_ref['volumes']: rpc.call(context, FLAGS.volume_topic, {"method": "check_for_export", @@ -751,7 +762,7 @@ class ComputeManager(manager.Manager): {"method": "pre_live_migration", "args": {'instance_id': instance_id}}) - except Exception, e: + except Exception: msg = _("Pre live migration for %(i_name)s failed at %(dest)s") LOG.error(msg % locals()) self.recover_live_migration(context, instance_ref) @@ -843,5 +854,5 @@ class ComputeManager(manager.Manager): 'state': power_state.RUNNING, 'host': host}) - for v in instance_ref['volumes']: - self.db.volume_update(ctxt, v['id'], {'status': 'in-use'}) + for volume in instance_ref['volumes']: + self.db.volume_update(ctxt, volume['id'], {'status': 'in-use'}) diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index 73ce651dab68..4485ba39fff2 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -100,9 +100,9 @@ class Scheduler(object): 'migrating') # Changing volume state - for v in instance_ref['volumes']: + for volume_ref in instance_ref['volumes']: db.volume_update(context, - v['id'], + volume_ref['id'], {'status': 'migrating'}) # Return value is necessary to send request to src @@ -121,17 +121,16 @@ class Scheduler(object): # Checking instance is running. if (power_state.RUNNING != instance_ref['state'] or \ 'running' != instance_ref['state_description']): - msg = _('Instance(%s) is not running') ec2_id = instance_ref['hostname'] - raise exception.Invalid(msg % ec2_id) + raise exception.Invalid(_('Instance(%s) is not running') % ec2_id) # Checing volume node is running when any volumes are mounted # to the instance. if len(instance_ref['volumes']) != 0: services = db.service_get_all_by_topic(context, 'volume') if len(services) < 1 or not self.service_is_up(services[0]): - msg = _('volume node is not alive(time synchronize problem?)') - raise exception.Invalid(msg) + raise exception.Invalid(_("volume node is not alive" + "(time synchronize problem?)")) # Checking src host exists and compute node src = instance_ref['host'] @@ -139,8 +138,8 @@ class Scheduler(object): # Checking src host is alive. if not self.service_is_up(services[0]): - msg = _('%s is not alive(time synchronize problem?)') - raise exception.Invalid(msg % src) + raise exception.Invalid(_("%s is not alive(time " + "synchronize problem?)") % src) def _live_migration_dest_check(self, context, instance_ref, dest): """Live migration check routine (for destination host). @@ -157,8 +156,8 @@ class Scheduler(object): # Checking dest host is alive. if not self.service_is_up(dservice_ref): - msg = _('%s is not alive(time synchronize problem?)') - raise exception.Invalid(msg % dest) + raise exception.Invalid(_("%s is not alive(time " + "synchronize problem?)") % dest) # Checking whether The host where instance is running # and dest is not same. @@ -170,7 +169,9 @@ class Scheduler(object): % locals()) # Checking dst host still has enough capacities. - self.has_enough_resources(context, instance_ref, dest) + self.assert_compute_node_has_enough_resources(context, + instance_ref, + dest) def _live_migration_common_check(self, context, instance_ref, dest): """Live migration common check routine. @@ -202,18 +203,20 @@ class Scheduler(object): oservice_ref = oservice_refs[0]['compute_service'][0] # Checking hypervisor is same. - o = oservice_ref['hypervisor_type'] - d = dservice_ref['hypervisor_type'] - if o != d: + orig_hypervisor = oservice_ref['hypervisor_type'] + dest_hypervisor = dservice_ref['hypervisor_type'] + if orig_hypervisor != dest_hypervisor: raise exception.Invalid(_("Different hypervisor type" - "(%(o)s->%(d)s)')" % locals())) + "(%(orig_hypervisor)s->" + "%(dest_hypervisor)s)')" % locals())) # Checkng hypervisor version. - o = oservice_ref['hypervisor_version'] - d = dservice_ref['hypervisor_version'] - if o > d: - raise exception.Invalid(_('Older hypervisor version(%(o)s->%(d)s)') - % locals()) + orig_hypervisor = oservice_ref['hypervisor_version'] + dest_hypervisor = dservice_ref['hypervisor_version'] + if orig_hypervisor > dest_hypervisor: + raise exception.Invalid(_("Older hypervisor version" + "(%(orig_hypervisor)s->" + "%(dest_hypervisor)s)") % locals()) # Checking cpuinfo. try: @@ -222,14 +225,15 @@ class Scheduler(object): {"method": 'compare_cpu', "args": {'cpu_info': oservice_ref['cpu_info']}}) - except rpc.RemoteError, e: + except rpc.RemoteError: ec2_id = instance_ref['hostname'] src = instance_ref['host'] logging.exception(_("host %(dest)s is not compatible with " "original host %(src)s.") % locals()) raise - def has_enough_resources(self, context, instance_ref, dest): + def assert_compute_node_has_enough_resources(self, context, + instance_ref, dest): """Checks if destination host has enough resource for live migration. Currently, only memory checking has been done. @@ -276,22 +280,24 @@ class Scheduler(object): dst_t = db.queue_get_for(context, FLAGS.compute_topic, dest) src_t = db.queue_get_for(context, FLAGS.compute_topic, src) - # create tmpfile at dest host try: - filename = rpc.call(context, dst_t, {"method": 'mktmpfile'}) - except rpc.RemoteError, e: - msg = _("Cannot create tmpfile at %s to confirm shared storage.") - LOG.error(msg % FLAGS.instances_path) - raise + # create tmpfile at dest host + filename = rpc.call(context, dst_t, + {"method": 'create_shared_storage_test_file'}) - # make sure existence at src host. - try: + # make sure existence at src host. rpc.call(context, src_t, - {"method": 'confirm_tmpfile', + {"method": 'check_shared_storage_test_file', "args": {'filename': filename}}) - except (rpc.RemoteError, exception.NotFound), e: + except rpc.RemoteError: ipath = FLAGS.instances_path - logging.error(_("Cannot comfirm %(ipath)s at %(dest)s is " - "located at same shared storage.") % locals()) + logging.error(_("Cannot comfirm tmpfile at %(ipath)s is on " + "same shared storage between %(src)s " + "and %(dest)s.") % locals()) raise + + finally: + rpc.call(context, dst_t, + {"method": 'cleanup_shared_storage_test_file', + "args": {'filename': filename}}) diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index cd5012fd5c11..a50d3ab201ac 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -98,24 +98,24 @@ class SchedulerManager(manager.Manager): # Getting usage resource information usage = {} instance_refs = db.instance_get_all_by_host(context, - compute_ref['host']) - if 0 == len(instance_refs): + compute_ref['host']) + if not instance_refs: return {'resource': resource, 'usage': usage} project_ids = [i['project_id'] for i in instance_refs] project_ids = list(set(project_ids)) - for i in project_ids: + for project_id in project_ids: vcpus = db.instance_get_vcpu_sum_by_host_and_project(context, host, - i) + project_id) mem = db.instance_get_memory_sum_by_host_and_project(context, host, - i) + project_id) hdd = db.instance_get_disk_sum_by_host_and_project(context, host, - i) - usage[i] = {'vcpus': int(vcpus), - 'memory_mb': int(mem), - 'local_gb': int(hdd)} + project_id) + usage[project_id] = {'vcpus': int(vcpus), + 'memory_mb': int(mem), + 'local_gb': int(hdd)} return {'resource': resource, 'usage': usage} diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index c4e4d148e451..62db42b115d6 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -661,7 +661,6 @@ class SimpleDriverTestCase(test.TestCase): self.scheduler.live_migration(self.context, FLAGS.compute_topic, instance_id=instance_id, dest=i_ref['host']) - self.mox.UnsetStubs() i_ref = db.instance_get(self.context, instance_id) self.assertTrue(i_ref['state_description'] == 'migrating') @@ -824,10 +823,15 @@ class SimpleDriverTestCase(test.TestCase): topic = FLAGS.compute_topic driver.rpc.call(mox.IgnoreArg(), db.queue_get_for(self.context, topic, dest), - {"method": 'mktmpfile'}).AndReturn(fpath) + {"method": 'create_shared_storage_test_file'}).AndReturn(fpath) driver.rpc.call(mox.IgnoreArg(), db.queue_get_for(mox.IgnoreArg(), topic, i_ref['host']), - {"method": 'confirm_tmpfile', "args": {'filename': fpath}}) + {"method": 'check_shared_storage_test_file', + "args": {'filename': fpath}}) + driver.rpc.call(mox.IgnoreArg(), + db.queue_get_for(mox.IgnoreArg(), topic, dest), + {"method": 'cleanup_shared_storage_test_file', + "args": {'filename': fpath}}) self.mox.ReplayAll() try: @@ -838,7 +842,6 @@ class SimpleDriverTestCase(test.TestCase): c = (e.message.find('does not exist') >= 0) self.assertTrue(c) - self.mox.UnsetStubs() db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) @@ -867,7 +870,6 @@ class SimpleDriverTestCase(test.TestCase): c = (e.message.find(_('Different hypervisor type')) >= 0) self.assertTrue(c) - self.mox.UnsetStubs() db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) db.service_destroy(self.context, s_ref2['id']) @@ -898,7 +900,6 @@ class SimpleDriverTestCase(test.TestCase): c = (e.message.find(_('Older hypervisor version')) >= 0) self.assertTrue(c) - self.mox.UnsetStubs() db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) db.service_destroy(self.context, s_ref2['id']) @@ -934,7 +935,6 @@ class SimpleDriverTestCase(test.TestCase): c = (e.message.find(_("doesn't have compatibility to")) >= 0) self.assertTrue(c) - self.mox.UnsetStubs() db.instance_destroy(self.context, instance_id) db.service_destroy(self.context, s_ref['id']) db.service_destroy(self.context, s_ref2['id']) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index f46b5950e85e..17b80c294887 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -14,7 +14,6 @@ # License for the specific language governing permissions and limitations # under the License. -import libvirt import mox from xml.etree.ElementTree import fromstring as xml_to_tree @@ -60,6 +59,7 @@ class LibvirtConnTestCase(test.TestCase): admin=True) self.project = self.manager.create_project('fake', 'fake', 'fake') self.network = utils.import_object(FLAGS.network_manager) + self.context = context.get_admin_context() FLAGS.instances_path = '' self.call_libvirt_dependant_setup = False @@ -73,22 +73,52 @@ class LibvirtConnTestCase(test.TestCase): 'bridge': 'br101', 'instance_type': 'm1.small'} - def libvirt_dependant_setup(self): - """A setup method of LibvirtConnection dependent test.""" - # try to connect libvirt. if fail, skip test. - self.call_libvirt_dependant_setup = True - try: - libvirt.openReadOnly('qemu:///system') - except libvirt.libvirtError: - return - return libvirt_conn.get_connection(False) + def create_fake_libvirt_mock(self, **kwargs): + """Defining mocks for LibvirtConnection(libvirt is not used).""" - def libvirt_dependant_teardown(self): - """teardown method of LibvirtConnection dependent test.""" - if self.call_libvirt_dependant_setup: - libvirt_conn.libvirt = None - libvirt_conn.libxml2 = None - self.call_libvirt_dependant_setup = False + # A fake libvirt.virtConnect + class FakeLibvirtConnection(object): + def getVersion(self): + return 12003 + + def getType(self): + return 'qemu' + + def getCapabilities(self): + return 'qemu' + + def listDomainsID(self): + return [] + + def getCapabilitied(self): + return + + # A fake libvirt_conn.IptablesFirewallDriver + class FakeIptablesFirewallDriver(object): + def __init__(self, **kwargs): + pass + + # Creating mocks + fake = FakeLibvirtConnection() + fakeip = FakeIptablesFirewallDriver + # Customizing above fake if necessary + for key, val in kwargs.items(): + fake.__setattr__(key, val) + + # Inevitable mocks for libvirt_conn.LibvirtConnection + self.mox.StubOutWithMock(libvirt_conn.utils, 'import_class') + libvirt_conn.utils.import_class(mox.IgnoreArg()).AndReturn(fakeip) + self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn') + libvirt_conn.LibvirtConnection._conn = fake + + def create_service(self, **kwargs): + service_ref = {'host': kwargs.get('host', 'dummy'), + 'binary': 'nova-compute', + 'topic': 'compute', + 'report_count': 0, + 'availability_zone': 'zone'} + + return db.service_create(context.get_admin_context(), service_ref) def test_xml_and_uri_no_ramdisk_no_kernel(self): instance_data = dict(self.test_instance) @@ -244,306 +274,55 @@ class LibvirtConnTestCase(test.TestCase): self.assertEquals(uri, testuri) db.instance_destroy(user_context, instance_ref['id']) - def test_get_vcpu_used(self): - """Check if get_local_gb_total returns appropriate disk value.""" - self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn') - libvirt_conn.LibvirtConnection._conn.listDomainsID().AndReturn([1, 2]) - vdmock = self.mox.CreateMock(libvirt.virDomain) - self.mox.StubOutWithMock(vdmock, "vcpus") - vdmock.vcpus().AndReturn(['', [('dummycpu'), ('dummycpu')]]) - vdmock.vcpus().AndReturn(['', [('dummycpu'), ('dummycpu')]]) - arg = mox.IgnoreArg() - libvirt_conn.LibvirtConnection._conn.lookupByID(arg).AndReturn(vdmock) - libvirt_conn.LibvirtConnection._conn.lookupByID(arg).AndReturn(vdmock) + def test_update_available_resource_works_correctly(self): + """Confirm compute_service table is updated successfully.""" + org_path = FLAGS.instances_path = '' + FLAGS.instances_path = '.' + + service_ref = self.create_service(host='dummy') + self.create_fake_libvirt_mock() + self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, + 'get_cpu_info') + libvirt_conn.LibvirtConnection.get_cpu_info().AndReturn('cpuinfo') self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) - self.assertTrue(conn.get_vcpu_used() == 4) - - def test_get_cpu_info_inappropreate_xml(self): - """Raise exception if given xml is inappropriate.""" - conn = self.libvirt_dependant_setup() - if not conn: - return - - xml = """ - x86_64 - Nehalem - Intel - - - - - - - - - - - - - - - - - - """ - - self.mox.StubOutWithMock(conn._conn, 'getCapabilities') - conn._conn.getCapabilities().AndReturn(xml) - - self.mox.ReplayAll() - try: - conn.get_cpu_info() - except exception.Invalid, e: - c1 = (0 <= e.message.find('Invalid xml')) - self.assertTrue(c1) - - def test_get_cpu_info_inappropreate_xml2(self): - """Raise exception if given xml is inappropriate(topology tag).""" - conn = self.libvirt_dependant_setup() - if not conn: - return - - xml = """ - x86_64 - Nehalem - Intel - - - - - - - - - - - - - - - - - """ - self.mox.StubOutWithMock(conn._conn, 'getCapabilities') - conn._conn.getCapabilities().AndReturn(xml) - - self.mox.ReplayAll() - try: - conn.get_cpu_info() - except exception.Invalid, e: - c1 = (0 <= e.message.find('Invalid xml: topology')) - self.assertTrue(c1) - - def test_update_available_resource_works_correctly(self): - """Confirm compute_service table is updated successfully.""" - conn = self.libvirt_dependant_setup() - if not conn: - return - - host = 'dummy' - zone = 'dummyzone' - ctxt = context.get_admin_context() - org_path = FLAGS.instances_path = '' - FLAGS.instances_path = '.' - - service_ref = db.service_create(ctxt, - {'host': host, - 'binary': 'nova-compute', - 'topic': 'compute', - 'report_count': 0, - 'availability_zone': zone}) - conn.update_available_resource(ctxt, host) - - service_ref = db.service_get(ctxt, service_ref['id']) - print service_ref['compute_service'] + conn.update_available_resource(self.context, 'dummy') + service_ref = db.service_get(self.context, service_ref['id']) compute_service = service_ref['compute_service'][0] + c1 = (compute_service['vcpus'] > 0) c2 = (compute_service['memory_mb'] > 0) c3 = (compute_service['local_gb'] > 0) - # vcpu_used is checked at test_get_vcpu_used. - c4 = (compute_service['memory_mb_used'] > 0) - c5 = (compute_service['local_gb_used'] > 0) - c6 = (len(compute_service['hypervisor_type']) > 0) - c7 = (compute_service['hypervisor_version'] > 0) + c4 = (compute_service['vcpus_used'] == 0) + c5 = (compute_service['memory_mb_used'] > 0) + c6 = (compute_service['local_gb_used'] > 0) + c7 = (len(compute_service['hypervisor_type']) > 0) + c8 = (compute_service['hypervisor_version'] > 0) - self.assertTrue(c1 and c2 and c3 and c4 and c5 and c6 and c7) + self.assertTrue(c1 and c2 and c3 and c4 and c5 and c6 and c7 and c8) - db.service_destroy(ctxt, service_ref['id']) + db.service_destroy(self.context, service_ref['id']) FLAGS.instances_path = org_path - def test_update_resource_info_raise_exception(self): + def test_update_resource_info_no_compute_record_found(self): """Raise exception if no recorde found on services table.""" - host = 'dummy' org_path = FLAGS.instances_path = '' FLAGS.instances_path = '.' - try: - conn = libvirt_conn.LibvirtConnection(False) - conn.update_available_resource(context.get_admin_context(), host) - except exception.Invalid, e: - msg = 'Cannot update compute manager specific info' - c1 = (0 <= e.message.find(msg)) - self.assertTrue(c1) + self.create_fake_libvirt_mock() + + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + self.assertRaises(exception.Invalid, + conn.update_available_resource, + self.context, 'dummy') + FLAGS.instances_path = org_path - def test_compare_cpu_works_correctly(self): - """Calling libvirt.compute_cpu() and works correctly.""" - conn = self.libvirt_dependant_setup() - if not conn: - return - host = 'dummy' - zone = 'dummyzone' - ctxt = context.get_admin_context() - org_path = FLAGS.instances_path = '' - FLAGS.instances_path = '.' - - service_ref = db.service_create(ctxt, - {'host': host, - 'binary': 'nova-compute', - 'topic': 'compute', - 'report_count': 0, - 'availability_zone': zone}) - conn.update_available_resource(ctxt, host) - service_ref = db.service_get(ctxt, service_ref['id']) - ret = conn.compare_cpu(service_ref['compute_service'][0]['cpu_info']) - self.assertTrue(ret == None) - - db.service_destroy(ctxt, service_ref['id']) - FLAGS.instances_path = org_path - - def test_compare_cpu_no_compatibility(self): - """Libvirt.compare_cpu() return less than 0.(no compatibility).""" - conn = self.libvirt_dependant_setup() - if not conn: - return - - t = {} - t['arch'] = 'x86' - t['model'] = 'model' - t['vendor'] = 'Intel' - t['topology'] = {'cores': "2", "threads": "1", "sockets": "4"} - t['features'] = ["tm"] - cpu_info = utils.dumps(t) - self.mox.StubOutWithMock(conn._conn, 'compareCPU') - conn._conn.compareCPU(mox.IgnoreArg(), 0).AndReturn(0) - - self.mox.ReplayAll() - self.assertRaises(exception.Invalid, conn.compare_cpu, cpu_info) - - def test_ensure_filtering_rules_for_instance_works_correctly(self): - """ensure_filtering_rules_for_instance() works successfully.""" - conn = self.libvirt_dependant_setup() - if not conn: - return - - instance_ref = models.Instance() - instance_ref.__setitem__('id', 1) - fwdriver = conn.firewall_driver - - self.mox.StubOutWithMock(fwdriver, 'setup_basic_filtering') - fwdriver.setup_basic_filtering(instance_ref) - self.mox.StubOutWithMock(fwdriver, 'prepare_instance_filter') - fwdriver.prepare_instance_filter(instance_ref) - self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn') - n = 'nova-instance-%s' % instance_ref.name - conn._conn.nwfilterLookupByName(n) - - self.mox.ReplayAll() - conn.ensure_filtering_rules_for_instance(instance_ref) - - def test_ensure_filtering_rules_for_instance_timeout(self): - """ensure_filtering_fules_for_instance() finishes with timeout.""" - conn = self.libvirt_dependant_setup() - if not conn: - return - - instance_ref = models.Instance() - instance_ref.__setitem__('id', 1) - fwdriver = conn.firewall_driver - - self.mox.StubOutWithMock(fwdriver, 'setup_basic_filtering') - fwdriver.setup_basic_filtering(instance_ref) - self.mox.StubOutWithMock(fwdriver, 'prepare_instance_filter') - fwdriver.prepare_instance_filter(instance_ref) - self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn') - n = 'nova-instance-%s' % instance_ref.name - for i in range(FLAGS.live_migration_retry_count): - conn._conn.nwfilterLookupByName(n).\ - AndRaise(libvirt.libvirtError('ERR')) - - self.mox.ReplayAll() - try: - conn.ensure_filtering_rules_for_instance(instance_ref) - except exception.Error, e: - c1 = (0 <= e.message.find('Timeout migrating for')) - self.assertTrue(c1) - - def test_live_migration_works_correctly(self): - """_live_migration() works as expected correctly.""" - conn = self.libvirt_dependant_setup() - if not conn: - return - - class dummyCall(object): - f = None - - def start(self, interval=0, now=False): - pass - - i_ref = models.Instance() - i_ref.__setitem__('id', 1) - ctxt = context.get_admin_context() - - vdmock = self.mox.CreateMock(libvirt.virDomain) - self.mox.StubOutWithMock(vdmock, "migrateToURI") - vdmock.migrateToURI(FLAGS.live_migration_uri % 'dest', - mox.IgnoreArg(), - None, FLAGS.live_migration_bandwidth).\ - AndReturn(None) - self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn') - conn._conn.lookupByName(i_ref.name).AndReturn(vdmock) - self.mox.StubOutWithMock(libvirt_conn.utils, 'LoopingCall') - libvirt_conn.utils.LoopingCall(f=None).AndReturn(dummyCall()) - - self.mox.ReplayAll() - # Nothing to do with setting post_method/recover_method or not. - ret = conn._live_migration(ctxt, i_ref, 'dest', '', '') - self.assertTrue(ret == None) - - def test_live_migration_raises_exception(self): - """Confirms recover method is called when exceptions are raised.""" - conn = self.libvirt_dependant_setup() - if not conn: - return - - i_ref = models.Instance() - i_ref.__setitem__('id', 1) - ctxt = context.get_admin_context() - - def dummy_recover_method(c, instance, host=None): - pass - - vdmock = self.mox.CreateMock(libvirt.virDomain) - self.mox.StubOutWithMock(vdmock, "migrateToURI") - vdmock.migrateToURI(FLAGS.live_migration_uri % 'dest', - mox.IgnoreArg(), - None, FLAGS.live_migration_bandwidth).\ - AndRaise(libvirt.libvirtError('ERR')) - self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, '_conn') - conn._conn.lookupByName(i_ref.name).AndReturn(vdmock) - - self.mox.ReplayAll() - self.assertRaises(libvirt.libvirtError, - conn._live_migration, - ctxt, i_ref, 'dest', - '', dummy_recover_method) - def tearDown(self): self.manager.delete_project(self.project) self.manager.delete_user(self.user) super(LibvirtConnTestCase, self).tearDown() - self.libvirt_dependant_teardown() class IptablesFirewallTestCase(test.TestCase): diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 75e4f0a533d4..70fdcc4534a0 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -891,8 +891,8 @@ class LibvirtConnection(object): """ total = 0 - for i in self._conn.listDomainsID(): - dom = self._conn.lookupByID(i) + for dom_id in self._conn.listDomainsID(): + dom = self._conn.lookupByID(dom_id) total += len(dom.vcpus()[1]) return total @@ -1048,7 +1048,7 @@ class LibvirtConnection(object): 'cpu_info': self.get_cpu_info()} compute_service_ref = service_ref['compute_service'] - if len(compute_service_ref) == 0: + if not compute_service_ref: LOG.info(_('Compute_service record is created for %s ') % host) dic['service_id'] = service_ref['id'] db.compute_service_create(ctxt, dic) @@ -1124,7 +1124,7 @@ class LibvirtConnection(object): # wait for completion timeout_count = range(FLAGS.live_migration_retry_count) - while len(timeout_count) != 0: + while not timeout_count: try: filter_name = 'nova-instance-%s' % instance_ref.name self._conn.nwfilterLookupByName(filter_name) @@ -1198,7 +1198,7 @@ class LibvirtConnection(object): None, FLAGS.live_migration_bandwidth) - except Exception, e: + except Exception: recover_method(ctxt, instance_ref) raise diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 5dc9077b41a1..9dea35b35f14 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -175,5 +175,5 @@ class VolumeManager(manager.Manager): def check_for_export(self, context, instance_id): """Make sure whether volume is exported.""" instance_ref = self.db.instance_get(context, instance_id) - for v in instance_ref['volumes']: - self.driver.check_for_export(context, v['id']) + for volume in instance_ref['volumes']: + self.driver.check_for_export(context, volume['id']) From 6797c5acc47fb5111ef821d6b074cb635692a9fb Mon Sep 17 00:00:00 2001 From: Monsyne Dragon Date: Thu, 3 Mar 2011 15:41:45 +0000 Subject: [PATCH 14/76] Add in multi-tenant support in openstack api. --- bin/nova-manage | 3 + nova/api/openstack/__init__.py | 25 ++++ nova/api/openstack/accounts.py | 73 +++++++++ nova/api/openstack/auth.py | 54 ++++++- nova/api/openstack/backup_schedules.py | 6 +- nova/api/openstack/consoles.py | 10 +- nova/api/openstack/flavors.py | 6 +- nova/api/openstack/images.py | 12 +- nova/api/openstack/servers.py | 38 ++--- nova/api/openstack/shared_ip_groups.py | 12 +- nova/api/openstack/users.py | 93 ++++++++++++ nova/api/openstack/zones.py | 12 +- nova/auth/novarc.template | 2 +- nova/db/sqlalchemy/api.py | 3 + nova/tests/api/openstack/fakes.py | 82 ++++++++++- nova/tests/api/openstack/test_accounts.py | 123 ++++++++++++++++ nova/tests/api/openstack/test_adminapi.py | 6 +- nova/tests/api/openstack/test_auth.py | 17 ++- nova/tests/api/openstack/test_flavors.py | 4 +- nova/tests/api/openstack/test_images.py | 6 +- nova/tests/api/openstack/test_servers.py | 51 +++---- nova/tests/api/openstack/test_users.py | 139 ++++++++++++++++++ nova/tests/api/openstack/test_zones.py | 12 +- .../xenapi/etc/xapi.d/plugins/glance | 2 +- 24 files changed, 688 insertions(+), 103 deletions(-) create mode 100644 nova/api/openstack/accounts.py create mode 100644 nova/api/openstack/users.py create mode 100644 nova/tests/api/openstack/test_accounts.py create mode 100644 nova/tests/api/openstack/test_users.py diff --git a/bin/nova-manage b/bin/nova-manage index 89332f2aff65..8f8f0a6a83e7 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -433,6 +433,8 @@ class ProjectCommands(object): "been created.\nPlease create a database by running a " "nova-api server on this host.") +AccountCommands = ProjectCommands + class FixedIpCommands(object): """Class for managing fixed ip.""" @@ -663,6 +665,7 @@ class VolumeCommands(object): CATEGORIES = [ ('user', UserCommands), + ('account', AccountCommands), ('project', ProjectCommands), ('role', RoleCommands), ('shell', ShellCommands), diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index b1b38ed2d017..73d52192e47c 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -27,6 +27,7 @@ import webob.exc from nova import flags from nova import log as logging from nova import wsgi +from nova.api.openstack import accounts from nova.api.openstack import faults from nova.api.openstack import backup_schedules from nova.api.openstack import consoles @@ -34,6 +35,7 @@ from nova.api.openstack import flavors from nova.api.openstack import images from nova.api.openstack import servers from nova.api.openstack import shared_ip_groups +from nova.api.openstack import users from nova.api.openstack import zones @@ -71,6 +73,18 @@ class APIRouter(wsgi.Router): def __init__(self): mapper = routes.Mapper() + accounts_controller = accounts.Controller() + mapper.connect("account", "/{id}", + controller=accounts_controller, action="show", + conditions=dict(method=["GET"])) + if FLAGS.allow_admin_api: + mapper.connect("/{id}", + controller=accounts_controller, action="update", + conditions=dict(method=["PUT"])) + mapper.connect("/{id}", + controller=accounts_controller, action="delete", + conditions=dict(method=["DELETE"])) + server_members = {'action': 'POST'} if FLAGS.allow_admin_api: LOG.debug(_("Including admin operations in API.")) @@ -84,27 +98,38 @@ class APIRouter(wsgi.Router): server_members['inject_network_info'] = 'POST' mapper.resource("zone", "zones", controller=zones.Controller(), + path_prefix="{account_id}/", + collection={'detail': 'GET'}) + + mapper.resource("user", "users", controller=users.Controller(), + path_prefix="{account_id}/", collection={'detail': 'GET'}) mapper.resource("server", "servers", controller=servers.Controller(), collection={'detail': 'GET'}, + path_prefix="{account_id}/", member=server_members) mapper.resource("backup_schedule", "backup_schedule", controller=backup_schedules.Controller(), + path_prefix="{account_id}/servers/{server_id}/", parent_resource=dict(member_name='server', collection_name='servers')) mapper.resource("console", "consoles", controller=consoles.Controller(), + path_prefix="{account_id}/servers/{server_id}/", parent_resource=dict(member_name='server', collection_name='servers')) mapper.resource("image", "images", controller=images.Controller(), + path_prefix="{account_id}/", collection={'detail': 'GET'}) mapper.resource("flavor", "flavors", controller=flavors.Controller(), + path_prefix="{account_id}/", collection={'detail': 'GET'}) mapper.resource("shared_ip_group", "shared_ip_groups", + path_prefix="{account_id}/", collection={'detail': 'GET'}, controller=shared_ip_groups.Controller()) diff --git a/nova/api/openstack/accounts.py b/nova/api/openstack/accounts.py new file mode 100644 index 000000000000..264fdab99c45 --- /dev/null +++ b/nova/api/openstack/accounts.py @@ -0,0 +1,73 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import common + +from nova import exception +from nova import flags +from nova import log as logging +from nova import wsgi + +from nova.auth import manager + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.api.openstack') + + +def _translate_keys(account): + return dict(id=account.id, + name=account.name, + description=account.description, + manager=account.project_manager_id) + + +class Controller(wsgi.Controller): + + _serialization_metadata = { + 'application/xml': { + "attributes": { + "account": ["id", "name", "description", "manager"]}}} + + def __init__(self): + self.manager = manager.AuthManager() + + def _check_admin(self, context): + """ We cannot depend on the db layer to check for admin access + for the auth manager, so we do it here """ + if not context.is_admin: + raise exception.NotAuthorized("Not admin user.") + + def show(self, req, id): + """Return data about the given account id""" + account = self.manager.get_project(id) + return dict(account=_translate_keys(account)) + + def delete(self, req, id): + self._check_admin(req.environ['nova.context']) + self.manager.delete_project(id) + return {} + + def update(self, req, id): + """ This is really create or update. """ + self._check_admin(req.environ['nova.context']) + env = self._deserialize(req.body, req) + description = env['account'].get('description') + manager = env['account'].get('manager') + try: + account = self.manager.get_project(id) + self.manager.modify_project(id, manager, description) + except exception.NotFound: + account = self.manager.create_project(id, manager, description) + return dict(account=_translate_keys(account)) diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index 6011e6115694..e77910fed1e6 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -28,11 +28,13 @@ from nova import context from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import manager from nova import utils from nova import wsgi from nova.api.openstack import faults +LOG = logging.getLogger('nova.api.openstack') FLAGS = flags.FLAGS @@ -50,14 +52,27 @@ class AuthMiddleware(wsgi.Middleware): def __call__(self, req): if not self.has_authentication(req): return self.authenticate(req) - user = self.get_user_by_authentication(req) + account_name = req.path_info_peek() if not user: return faults.Fault(webob.exc.HTTPUnauthorized()) - project = self.auth.get_project(FLAGS.default_project) - req.environ['nova.context'] = context.RequestContext(user, project) + if not account_name: + if self.auth.is_admin(user): + account_name = FLAGS.default_project + else: + return faults.Fault(webob.exc.HTTPUnauthorized()) + try: + account = self.auth.get_project(account_name) + except exception.NotFound: + return faults.Fault(webob.exc.HTTPUnauthorized()) + + if not self.auth.is_admin(user) and \ + not self.auth.is_project_member(user, account): + return faults.Fault(webob.exc.HTTPUnauthorized()) + + req.environ['nova.context'] = context.RequestContext(user, account) return self.application def has_authentication(self, req): @@ -70,6 +85,7 @@ class AuthMiddleware(wsgi.Middleware): # Unless the request is explicitly made against // don't # honor it path_info = req.path_info + account_name = None if len(path_info) > 1: return faults.Fault(webob.exc.HTTPUnauthorized()) @@ -79,7 +95,10 @@ class AuthMiddleware(wsgi.Middleware): except KeyError: return faults.Fault(webob.exc.HTTPUnauthorized()) - token, user = self._authorize_user(username, key, req) + if ':' in username: + account_name, username = username.rsplit(':', 1) + + token, user = self._authorize_user(username, account_name, key, req) if user and token: res = webob.Response() res.headers['X-Auth-Token'] = token.token_hash @@ -116,23 +135,44 @@ class AuthMiddleware(wsgi.Middleware): return self.auth.get_user(token.user_id) return None - def _authorize_user(self, username, key, req): + def _authorize_user(self, username, account_name, key, req): """Generates a new token and assigns it to a user. username - string + account_name - string key - string API key req - webob.Request object """ ctxt = context.get_admin_context() user = self.auth.get_user_from_access_key(key) + if account_name: + try: + account = self.auth.get_project(account_name) + except exception.NotFound: + return None, None + else: + # (dragondm) punt and try to determine account. + # this is something of a hack, but a user on 1 account is a + # common case, and is the way the current RS code works. + accounts = self.auth.get_projects(user=user) + if len(accounts) == 1: + account = accounts[0] + else: + #we can't tell what account they are logging in for. + return None, None + if user and user.name == username: token_hash = hashlib.sha1('%s%s%f' % (username, key, time.time())).hexdigest() token_dict = {} token_dict['token_hash'] = token_hash token_dict['cdn_management_url'] = '' - # Same as auth url, e.g. http://foo.org:8774/baz/v1.0 - token_dict['server_management_url'] = req.url + # auth url + project (account) id, e.g. + # http://foo.org:8774/baz/v1.0/myacct/ + os_url = '%s%s%s/' % (req.url, + '' if req.url.endswith('/') else '/', + account.id) + token_dict['server_management_url'] = os_url token_dict['storage_url'] = '' token_dict['user_id'] = user.id token = self.db.auth_token_create(ctxt, token_dict) diff --git a/nova/api/openstack/backup_schedules.py b/nova/api/openstack/backup_schedules.py index 7abb5f884d46..a4d5939df6ee 100644 --- a/nova/api/openstack/backup_schedules.py +++ b/nova/api/openstack/backup_schedules.py @@ -40,15 +40,15 @@ class Controller(wsgi.Controller): def __init__(self): pass - def index(self, req, server_id): + def index(self, req, server_id, **kw): """ Returns the list of backup schedules for a given instance """ return _translate_keys({}) - def create(self, req, server_id): + def create(self, req, server_id, **kw): """ No actual update method required, since the existing API allows both create and update through a POST """ return faults.Fault(exc.HTTPNotImplemented()) - def delete(self, req, server_id, id): + def delete(self, req, server_id, id, **kw): """ Deletes an existing backup schedule """ return faults.Fault(exc.HTTPNotImplemented()) diff --git a/nova/api/openstack/consoles.py b/nova/api/openstack/consoles.py index 9ebdbe710ad6..85b2a4140621 100644 --- a/nova/api/openstack/consoles.py +++ b/nova/api/openstack/consoles.py @@ -55,7 +55,7 @@ class Controller(wsgi.Controller): self.console_api = console.API() super(Controller, self).__init__() - def index(self, req, server_id): + def index(self, req, server_id, **kw): """Returns a list of consoles for this instance""" consoles = self.console_api.get_consoles( req.environ['nova.context'], @@ -63,14 +63,14 @@ class Controller(wsgi.Controller): return dict(consoles=[_translate_keys(console) for console in consoles]) - def create(self, req, server_id): + def create(self, req, server_id, **kw): """Creates a new console""" #info = self._deserialize(req.body, req) self.console_api.create_console( req.environ['nova.context'], int(server_id)) - def show(self, req, server_id, id): + def show(self, req, server_id, id, **kw): """Shows in-depth information on a specific console""" try: console = self.console_api.get_console( @@ -81,11 +81,11 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPNotFound()) return _translate_detail_keys(console) - def update(self, req, server_id, id): + def update(self, req, server_id, id, **kw): """You can't update a console""" raise faults.Fault(exc.HTTPNotImplemented()) - def delete(self, req, server_id, id): + def delete(self, req, server_id, id, **kw): """Deletes a console""" try: self.console_api.delete_console(req.environ['nova.context'], diff --git a/nova/api/openstack/flavors.py b/nova/api/openstack/flavors.py index f620d4107291..79c3e1ab3f1d 100644 --- a/nova/api/openstack/flavors.py +++ b/nova/api/openstack/flavors.py @@ -32,18 +32,18 @@ class Controller(wsgi.Controller): "attributes": { "flavor": ["id", "name", "ram", "disk"]}}} - def index(self, req): + def index(self, req, **kw): """Return all flavors in brief.""" return dict(flavors=[dict(id=flavor['id'], name=flavor['name']) for flavor in self.detail(req)['flavors']]) - def detail(self, req): + def detail(self, req, **kw): """Return all flavors in detail.""" items = [self.show(req, id)['flavor'] for id in self._all_ids()] items = common.limited(items, req) return dict(flavors=items) - def show(self, req, id): + def show(self, req, id, **kw): """Return data about the given flavor id.""" for name, val in instance_types.INSTANCE_TYPES.iteritems(): if val['flavorid'] == int(id): diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index cf85a496f017..5bc5b9978767 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -115,14 +115,14 @@ class Controller(wsgi.Controller): def __init__(self): self._service = utils.import_object(FLAGS.image_service) - def index(self, req): + def index(self, req, **kw): """Return all public images in brief""" items = self._service.index(req.environ['nova.context']) items = common.limited(items, req) items = [_filter_keys(item, ('id', 'name')) for item in items] return dict(images=items) - def detail(self, req): + def detail(self, req, **kw): """Return all public images in detail""" try: items = self._service.detail(req.environ['nova.context']) @@ -136,7 +136,7 @@ class Controller(wsgi.Controller): items = [_translate_status(item) for item in items] return dict(images=items) - def show(self, req, id): + def show(self, req, id, **kw): """Return data about the given image id""" image_id = common.get_image_id_from_image_hash(self._service, req.environ['nova.context'], id) @@ -145,11 +145,11 @@ class Controller(wsgi.Controller): _convert_image_id_to_hash(image) return dict(image=image) - def delete(self, req, id): + def delete(self, req, id, **kw): # Only public images are supported for now. raise faults.Fault(exc.HTTPNotFound()) - def create(self, req): + def create(self, req, **kw): context = req.environ['nova.context'] env = self._deserialize(req.body, req) instance_id = env["image"]["serverId"] @@ -160,7 +160,7 @@ class Controller(wsgi.Controller): return dict(image=image_meta) - def update(self, req, id): + def update(self, req, id, **kw): # Users may not modify public images, and that's all that # we support for now. raise faults.Fault(exc.HTTPNotFound()) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 69273ad7b09f..426de92bec0f 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -105,11 +105,11 @@ class Controller(wsgi.Controller): self._image_service = utils.import_object(FLAGS.image_service) super(Controller, self).__init__() - def index(self, req): + def index(self, req, **kw): """ Returns a list of server names and ids for a given user """ return self._items(req, entity_maker=_translate_keys) - def detail(self, req): + def detail(self, req, **kw): """ Returns a list of server details for a given user """ return self._items(req, entity_maker=_translate_detail_keys) @@ -123,7 +123,7 @@ class Controller(wsgi.Controller): res = [entity_maker(inst)['server'] for inst in limited_list] return dict(servers=res) - def show(self, req, id): + def show(self, req, id, **kw): """ Returns server details by server id """ try: instance = self.compute_api.get(req.environ['nova.context'], id) @@ -131,7 +131,7 @@ class Controller(wsgi.Controller): except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) - def delete(self, req, id): + def delete(self, req, id, **kw): """ Destroys a server """ try: self.compute_api.delete(req.environ['nova.context'], id) @@ -139,7 +139,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPNotFound()) return exc.HTTPAccepted() - def create(self, req): + def create(self, req, **kw): """ Creates a new server for a given user """ env = self._deserialize(req.body, req) if not env: @@ -180,7 +180,7 @@ class Controller(wsgi.Controller): onset_files=env.get('onset_files', [])) return _translate_keys(instances[0]) - def update(self, req, id): + def update(self, req, id, **kw): """ Updates the server name or password """ inst_dict = self._deserialize(req.body, req) if not inst_dict: @@ -202,7 +202,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPNotFound()) return exc.HTTPNoContent() - def action(self, req, id): + def action(self, req, id, **kw): """ Multi-purpose method used to reboot, rebuild, and resize a server """ input_dict = self._deserialize(req.body, req) @@ -219,7 +219,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() - def lock(self, req, id): + def lock(self, req, id, **kw): """ lock the instance with id admin only operation @@ -234,7 +234,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() - def unlock(self, req, id): + def unlock(self, req, id, **kw): """ unlock the instance with id admin only operation @@ -249,7 +249,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() - def get_lock(self, req, id): + def get_lock(self, req, id, **kw): """ return the boolean state of (instance with id)'s lock @@ -263,7 +263,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() - def reset_network(self, req, id): + def reset_network(self, req, id, **kw): """ Reset networking on an instance (admin only). @@ -277,7 +277,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() - def inject_network_info(self, req, id): + def inject_network_info(self, req, id, **kw): """ Inject network info for an instance (admin only). @@ -291,7 +291,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() - def pause(self, req, id): + def pause(self, req, id, **kw): """ Permit Admins to Pause the server. """ ctxt = req.environ['nova.context'] try: @@ -302,7 +302,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() - def unpause(self, req, id): + def unpause(self, req, id, **kw): """ Permit Admins to Unpause the server. """ ctxt = req.environ['nova.context'] try: @@ -313,7 +313,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() - def suspend(self, req, id): + def suspend(self, req, id, **kw): """permit admins to suspend the server""" context = req.environ['nova.context'] try: @@ -324,7 +324,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() - def resume(self, req, id): + def resume(self, req, id, **kw): """permit admins to resume the server from suspend""" context = req.environ['nova.context'] try: @@ -335,7 +335,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() - def get_ajax_console(self, req, id): + def get_ajax_console(self, req, id, **kw): """ Returns a url to an instance's ajaxterm console. """ try: self.compute_api.get_ajax_console(req.environ['nova.context'], @@ -344,12 +344,12 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPNotFound()) return exc.HTTPAccepted() - def diagnostics(self, req, id): + def diagnostics(self, req, id, **kw): """Permit Admins to retrieve server diagnostics.""" ctxt = req.environ["nova.context"] return self.compute_api.get_diagnostics(ctxt, id) - def actions(self, req, id): + def actions(self, req, id, **kw): """Permit Admins to retrieve server actions.""" ctxt = req.environ["nova.context"] items = self.compute_api.get_actions(ctxt, id) diff --git a/nova/api/openstack/shared_ip_groups.py b/nova/api/openstack/shared_ip_groups.py index 5d78f93777db..e3c9177494d6 100644 --- a/nova/api/openstack/shared_ip_groups.py +++ b/nova/api/openstack/shared_ip_groups.py @@ -40,26 +40,26 @@ class Controller(wsgi.Controller): 'attributes': { 'sharedIpGroup': []}}} - def index(self, req): + def index(self, req, **kw): """ Returns a list of Shared IP Groups for the user """ return dict(sharedIpGroups=[]) - def show(self, req, id): + def show(self, req, id, **kw): """ Shows in-depth information on a specific Shared IP Group """ return _translate_keys({}) - def update(self, req, id): + def update(self, req, id, **kw): """ You can't update a Shared IP Group """ raise faults.Fault(exc.HTTPNotImplemented()) - def delete(self, req, id): + def delete(self, req, id, **kw): """ Deletes a Shared IP Group """ raise faults.Fault(exc.HTTPNotImplemented()) - def detail(self, req): + def detail(self, req, **kw): """ Returns a complete list of Shared IP Groups """ return _translate_detail_keys({}) - def create(self, req): + def create(self, req, **kw): """ Creates a new Shared IP group """ raise faults.Fault(exc.HTTPNotImplemented()) diff --git a/nova/api/openstack/users.py b/nova/api/openstack/users.py new file mode 100644 index 000000000000..c0b7544f953c --- /dev/null +++ b/nova/api/openstack/users.py @@ -0,0 +1,93 @@ +# Copyright 2011 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import common + +from nova import exception +from nova import flags +from nova import log as logging +from nova import wsgi + +from nova.auth import manager + +FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.api.openstack') + + +def _translate_keys(user): + return dict(id=user.id, + name=user.name, + access=user.access, + secret=user.secret, + admin=user.admin) + + +class Controller(wsgi.Controller): + + _serialization_metadata = { + 'application/xml': { + "attributes": { + "user": ["id", "name", "access", "secret", "admin"]}}} + + def __init__(self): + self.manager = manager.AuthManager() + + def _check_admin(self, context): + """ We cannot depend on the db layer to check for admin access + for the auth manager, so we do it here """ + if not context.is_admin: + raise exception.NotAuthorized("Not admin user") + + def index(self, req, **kw): + """Return all users in brief""" + users = self.manager.get_users() + users = common.limited(users, req) + users = [_translate_keys(user) for user in users] + return dict(users=users) + + def detail(self, req, **kw): + """Return all users in detail""" + return self.index(req) + + def show(self, req, id, **kw): + """Return data about the given user id""" + user = self.manager.get_user(id) + return dict(user=_translate_keys(user)) + + def delete(self, req, id, **kw): + self._check_admin(req.environ['nova.context']) + self.manager.delete_user(id) + return {} + + def create(self, req, **kw): + self._check_admin(req.environ['nova.context']) + env = self._deserialize(req.body, req) + is_admin = env['user'].get('admin') in ('T', 'True', True) + name = env['user'].get('name') + access = env['user'].get('access') + secret = env['user'].get('secret') + user = self.manager.create_user(name, access, secret, is_admin) + return dict(user=_translate_keys(user)) + + def update(self, req, id, **kw): + self._check_admin(req.environ['nova.context']) + env = self._deserialize(req.body, req) + is_admin = env['user'].get('admin') + if is_admin is not None: + is_admin = is_admin in ('T', 'True', True) + access = env['user'].get('access') + secret = env['user'].get('secret') + self.manager.modify_user(id, access, secret, is_admin) + return dict(user=_translate_keys(self.manager.get_user(id))) diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py index d5206da20927..30bf2b67bc2b 100644 --- a/nova/api/openstack/zones.py +++ b/nova/api/openstack/zones.py @@ -43,35 +43,35 @@ class Controller(wsgi.Controller): "attributes": { "zone": ["id", "api_url"]}}} - def index(self, req): + def index(self, req, **kw): """Return all zones in brief""" items = db.zone_get_all(req.environ['nova.context']) items = common.limited(items, req) items = [_scrub_zone(item) for item in items] return dict(zones=items) - def detail(self, req): + def detail(self, req, **kw): """Return all zones in detail""" return self.index(req) - def show(self, req, id): + def show(self, req, id, **kw): """Return data about the given zone id""" zone_id = int(id) zone = db.zone_get(req.environ['nova.context'], zone_id) return dict(zone=_scrub_zone(zone)) - def delete(self, req, id): + def delete(self, req, id, **kw): zone_id = int(id) db.zone_delete(req.environ['nova.context'], zone_id) return {} - def create(self, req): + def create(self, req, **kw): context = req.environ['nova.context'] env = self._deserialize(req.body, req) zone = db.zone_create(context, env["zone"]) return dict(zone=_scrub_zone(zone)) - def update(self, req, id): + def update(self, req, id, **kw): context = req.environ['nova.context'] env = self._deserialize(req.body, req) zone_id = int(id) diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template index cda2ecc28b68..1c917ad44cec 100644 --- a/nova/auth/novarc.template +++ b/nova/auth/novarc.template @@ -11,5 +11,5 @@ export EUCALYPTUS_CERT=${NOVA_CERT} # euca-bundle-image seems to require this se alias ec2-bundle-image="ec2-bundle-image --cert ${EC2_CERT} --privatekey ${EC2_PRIVATE_KEY} --user 42 --ec2cert ${NOVA_CERT}" alias ec2-upload-bundle="ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}" export NOVA_API_KEY="%(access)s" -export NOVA_USERNAME="%(user)s" +export NOVA_USERNAME="%(project)s:%(user)s" export NOVA_URL="%(os)s" diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 6df2a88434e2..e311f310ae01 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1861,8 +1861,11 @@ def project_get_by_user(context, user_id): session = get_session() user = session.query(models.User).\ filter_by(deleted=can_read_deleted(context)).\ + filter_by(id=user_id).\ options(joinedload_all('projects')).\ first() + if not user: + raise exception.NotFound(_('Invalid user_id %s') % user_id) return user.projects diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 49ce8c1b5efe..03b26e29a042 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -26,7 +26,6 @@ from paste import urlmap from glance import client as glance_client -from nova import auth from nova import context from nova import exception as exc from nova import flags @@ -35,6 +34,7 @@ import nova.api.openstack.auth from nova.api import openstack from nova.api.openstack import auth from nova.api.openstack import ratelimiting +from nova.auth.manager import User, Project from nova.image import glance from nova.image import local from nova.image import service @@ -227,19 +227,97 @@ class FakeAuthDatabase(object): class FakeAuthManager(object): auth_data = {} + projects = {} + + @classmethod + def clear_fakes(cls): + cls.auth_data = {} + cls.projects = {} + + @classmethod + def reset_fake_data(cls): + cls.auth_data = dict(acc1=User('guy1', 'guy1', 'acc1', + 'fortytwo!', False)) + cls.projects = dict(testacct=Project('testacct', + 'testacct', + 'guy1', + 'test', + [])) def add_user(self, key, user): FakeAuthManager.auth_data[key] = user + def get_users(self): + return FakeAuthManager.auth_data.values() + def get_user(self, uid): for k, v in FakeAuthManager.auth_data.iteritems(): if v.id == uid: return v return None - def get_project(self, pid): + def delete_user(self, uid): + for k, v in FakeAuthManager.auth_data.items(): + if v.id == uid: + del FakeAuthManager.auth_data[k] return None + def create_user(self, name, access=None, secret=None, admin=False): + u = User(name, name, access, secret, admin) + FakeAuthManager.auth_data[access] = u + return u + + def modify_user(self, user_id, access=None, secret=None, admin=None): + user = None + for k, v in FakeAuthManager.auth_data.iteritems(): + if v.id == user_id: + user = v + if user: + user.access = access + user.secret = secret + if admin is not None: + user.admin = admin + + def is_admin(self, user): + return user.admin + + def is_project_member(self, user, project): + return ((user.id in project.member_ids) or + (user.id == project.project_manager_id)) + + def create_project(self, name, manager_user, description=None, + member_users=None): + member_ids = [User.safe_id(m) for m in member_users] \ + if member_users else [] + p = Project(name, name, User.safe_id(manager_user), + description, member_ids) + FakeAuthManager.projects[name] = p + return p + + def delete_project(self, pid): + if pid in FakeAuthManager.projects: + del FakeAuthManager.projects[pid] + + def modify_project(self, project, manager_user=None, description=None): + p = FakeAuthManager.projects.get(project) + p.project_manager_id = User.safe_id(manager_user) + p.description = description + + def get_project(self, pid): + p = FakeAuthManager.projects.get(pid) + if p: + return p + else: + raise exc.NotFound + + def get_projects(self, user=None): + if not user: + return FakeAuthManager.projects.values() + else: + return [p for p in FakeAuthManager.projects.values() + if (user.id in p.member_ids) or + (user.id == p.project_manager_id)] + def get_user_from_access_key(self, key): return FakeAuthManager.auth_data.get(key, None) diff --git a/nova/tests/api/openstack/test_accounts.py b/nova/tests/api/openstack/test_accounts.py new file mode 100644 index 000000000000..b2e89824add7 --- /dev/null +++ b/nova/tests/api/openstack/test_accounts.py @@ -0,0 +1,123 @@ +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import stubout +import webob +import json + +import nova.api +import nova.api.openstack.auth +from nova import context +from nova import flags +from nova import test +from nova.auth.manager import User +from nova.tests.api.openstack import fakes + + +FLAGS = flags.FLAGS +FLAGS.verbose = True + + +def fake_init(self): + self.manager = fakes.FakeAuthManager() + + +def fake_admin_check(self, req): + return True + + +class AccountsTest(test.TestCase): + def setUp(self): + super(AccountsTest, self).setUp() + self.stubs = stubout.StubOutForTesting() + self.stubs.Set(nova.api.openstack.accounts.Controller, '__init__', + fake_init) + self.stubs.Set(nova.api.openstack.accounts.Controller, '_check_admin', + fake_admin_check) + fakes.FakeAuthManager.auth_data = {} + fakes.FakeAuthManager.projects = {} + fakes.FakeAuthDatabase.data = {} + fakes.stub_out_networking(self.stubs) + fakes.stub_out_rate_limiting(self.stubs) + fakes.stub_out_auth(self.stubs) + + self.allow_admin = FLAGS.allow_admin_api + FLAGS.allow_admin_api = True + fakemgr = fakes.FakeAuthManager() + joeuser = User('guy1', 'guy1', 'acc1', 'fortytwo!', False) + superuser = User('guy2', 'guy2', 'acc2', 'swordfish', True) + fakemgr.add_user(joeuser.access, joeuser) + fakemgr.add_user(superuser.access, superuser) + fakemgr.create_project('test1', joeuser) + fakemgr.create_project('test2', superuser) + + def tearDown(self): + self.stubs.UnsetAll() + FLAGS.allow_admin_api = self.allow_admin + super(AccountsTest, self).tearDown() + + def test_get_account(self): + req = webob.Request.blank('/v1.0/test1') + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res_dict['account']['id'], 'test1') + self.assertEqual(res_dict['account']['name'], 'test1') + self.assertEqual(res_dict['account']['manager'], 'guy1') + self.assertEqual(res.status_int, 200) + + def test_account_delete(self): + req = webob.Request.blank('/v1.0/test1') + req.method = 'DELETE' + res = req.get_response(fakes.wsgi_app()) + self.assertTrue('test1' not in fakes.FakeAuthManager.projects) + self.assertEqual(res.status_int, 200) + + def test_account_create(self): + body = dict(account=dict(description='test account', + manager='guy1')) + req = webob.Request.blank('/v1.0/newacct') + req.method = 'PUT' + req.body = json.dumps(body) + + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 200) + self.assertEqual(res_dict['account']['id'], 'newacct') + self.assertEqual(res_dict['account']['name'], 'newacct') + self.assertEqual(res_dict['account']['description'], 'test account') + self.assertEqual(res_dict['account']['manager'], 'guy1') + self.assertTrue('newacct' in + fakes.FakeAuthManager.projects) + self.assertEqual(len(fakes.FakeAuthManager.projects.values()), 3) + + def test_account_update(self): + body = dict(account=dict(description='test account', + manager='guy2')) + req = webob.Request.blank('/v1.0/test1') + req.method = 'PUT' + req.body = json.dumps(body) + + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 200) + self.assertEqual(res_dict['account']['id'], 'test1') + self.assertEqual(res_dict['account']['name'], 'test1') + self.assertEqual(res_dict['account']['description'], 'test account') + self.assertEqual(res_dict['account']['manager'], 'guy2') + self.assertEqual(len(fakes.FakeAuthManager.projects.values()), 2) diff --git a/nova/tests/api/openstack/test_adminapi.py b/nova/tests/api/openstack/test_adminapi.py index dfce1b1273bb..7cb9e8450932 100644 --- a/nova/tests/api/openstack/test_adminapi.py +++ b/nova/tests/api/openstack/test_adminapi.py @@ -35,7 +35,7 @@ class AdminAPITest(test.TestCase): def setUp(self): super(AdminAPITest, self).setUp() self.stubs = stubout.StubOutForTesting() - fakes.FakeAuthManager.auth_data = {} + fakes.FakeAuthManager.reset_fake_data() fakes.FakeAuthDatabase.data = {} fakes.stub_out_networking(self.stubs) fakes.stub_out_rate_limiting(self.stubs) @@ -50,7 +50,7 @@ class AdminAPITest(test.TestCase): def test_admin_enabled(self): FLAGS.allow_admin_api = True # We should still be able to access public operations. - req = webob.Request.blank('/v1.0/flavors') + req = webob.Request.blank('/v1.0/testacct/flavors') res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) # TODO: Confirm admin operations are available. @@ -58,7 +58,7 @@ class AdminAPITest(test.TestCase): def test_admin_disabled(self): FLAGS.allow_admin_api = False # We should still be able to access public operations. - req = webob.Request.blank('/v1.0/flavors') + req = webob.Request.blank('/v1.0/testacct/flavors') res = req.get_response(fakes.wsgi_app()) # TODO: Confirm admin operations are unavailable. self.assertEqual(res.status_int, 200) diff --git a/nova/tests/api/openstack/test_auth.py b/nova/tests/api/openstack/test_auth.py index ff8d42a1410c..8268a6fb9915 100644 --- a/nova/tests/api/openstack/test_auth.py +++ b/nova/tests/api/openstack/test_auth.py @@ -51,7 +51,9 @@ class Test(test.TestCase): def test_authorize_user(self): f = fakes.FakeAuthManager() - f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None)) + u = nova.auth.manager.User(1, 'herp', None, None, None) + f.add_user('derp', u) + f.create_project('test', u) req = webob.Request.blank('/v1.0/') req.headers['X-Auth-User'] = 'herp' @@ -65,7 +67,9 @@ class Test(test.TestCase): def test_authorize_token(self): f = fakes.FakeAuthManager() - f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None)) + u = nova.auth.manager.User(1, 'herp', None, None, None) + f.add_user('derp', u) + f.create_project('test', u) req = webob.Request.blank('/v1.0/', {'HTTP_HOST': 'foo'}) req.headers['X-Auth-User'] = 'herp' @@ -74,7 +78,7 @@ class Test(test.TestCase): self.assertEqual(result.status, '204 No Content') self.assertEqual(len(result.headers['X-Auth-Token']), 40) self.assertEqual(result.headers['X-Server-Management-Url'], - "http://foo/v1.0/") + "http://foo/v1.0/test/") self.assertEqual(result.headers['X-CDN-Management-Url'], "") self.assertEqual(result.headers['X-Storage-Url'], "") @@ -82,7 +86,7 @@ class Test(test.TestCase): token = result.headers['X-Auth-Token'] self.stubs.Set(nova.api.openstack, 'APIRouter', fakes.FakeRouter) - req = webob.Request.blank('/v1.0/fake') + req = webob.Request.blank('/v1.0/test/fake') req.headers['X-Auth-Token'] = token result = req.get_response(fakes.wsgi_app()) self.assertEqual(result.status, '200 OK') @@ -176,6 +180,9 @@ class TestLimiter(test.TestCase): def test_authorize_token(self): f = fakes.FakeAuthManager() + u = nova.auth.manager.User(1, 'herp', None, None, None) + f.add_user('derp', u) + f.create_project('test', u) f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None)) req = webob.Request.blank('/v1.0/') @@ -187,7 +194,7 @@ class TestLimiter(test.TestCase): token = result.headers['X-Auth-Token'] self.stubs.Set(nova.api.openstack, 'APIRouter', fakes.FakeRouter) - req = webob.Request.blank('/v1.0/fake') + req = webob.Request.blank('/v1.0/test/fake') req.method = 'POST' req.headers['X-Auth-Token'] = token result = req.get_response(fakes.wsgi_app()) diff --git a/nova/tests/api/openstack/test_flavors.py b/nova/tests/api/openstack/test_flavors.py index 7612659655e9..370dc007c48d 100644 --- a/nova/tests/api/openstack/test_flavors.py +++ b/nova/tests/api/openstack/test_flavors.py @@ -28,7 +28,7 @@ class FlavorsTest(test.TestCase): def setUp(self): super(FlavorsTest, self).setUp() self.stubs = stubout.StubOutForTesting() - fakes.FakeAuthManager.auth_data = {} + fakes.FakeAuthManager.reset_fake_data() fakes.FakeAuthDatabase.data = {} fakes.stub_out_networking(self.stubs) fakes.stub_out_rate_limiting(self.stubs) @@ -39,7 +39,7 @@ class FlavorsTest(test.TestCase): super(FlavorsTest, self).tearDown() def test_get_flavor_list(self): - req = webob.Request.blank('/v1.0/flavors') + req = webob.Request.blank('/v1.0/testacct/flavors') res = req.get_response(fakes.wsgi_app()) def test_get_flavor_by_id(self): diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index e232bc3d500d..819ca001eaa0 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -202,7 +202,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): self.orig_image_service = FLAGS.image_service FLAGS.image_service = 'nova.image.glance.GlanceImageService' self.stubs = stubout.StubOutForTesting() - fakes.FakeAuthManager.auth_data = {} + fakes.FakeAuthManager.reset_fake_data() fakes.FakeAuthDatabase.data = {} fakes.stub_out_networking(self.stubs) fakes.stub_out_rate_limiting(self.stubs) @@ -216,7 +216,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): super(ImageControllerWithGlanceServiceTest, self).tearDown() def test_get_image_index(self): - req = webob.Request.blank('/v1.0/images') + req = webob.Request.blank('/v1.0/testacct/images') res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) @@ -228,7 +228,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): "image %s not in fixture index!" % str(image)) def test_get_image_details(self): - req = webob.Request.blank('/v1.0/images/detail') + req = webob.Request.blank('/v1.0/testacct/images/detail') res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index 78beb7df98c6..d592e06b0af3 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -118,7 +118,7 @@ class ServersTest(test.TestCase): def setUp(self): super(ServersTest, self).setUp() self.stubs = stubout.StubOutForTesting() - fakes.FakeAuthManager.auth_data = {} + fakes.FakeAuthManager.reset_fake_data() fakes.FakeAuthDatabase.data = {} fakes.stub_out_networking(self.stubs) fakes.stub_out_rate_limiting(self.stubs) @@ -150,7 +150,7 @@ class ServersTest(test.TestCase): super(ServersTest, self).tearDown() def test_get_server_by_id(self): - req = webob.Request.blank('/v1.0/servers/1') + req = webob.Request.blank('/v1.0/testacct/servers/1') res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(res_dict['server']['id'], '1') @@ -161,7 +161,7 @@ class ServersTest(test.TestCase): public = ["1.2.3.4"] new_return_server = return_server_with_addresses(private, public) self.stubs.Set(nova.db.api, 'instance_get', new_return_server) - req = webob.Request.blank('/v1.0/servers/1') + req = webob.Request.blank('/v1.0/testacct/servers/1') res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(res_dict['server']['id'], '1') @@ -173,7 +173,7 @@ class ServersTest(test.TestCase): self.assertEqual(addresses["private"][0], private) def test_get_server_list(self): - req = webob.Request.blank('/v1.0/servers') + req = webob.Request.blank('/v1.0/testacct/servers') res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) @@ -224,7 +224,7 @@ class ServersTest(test.TestCase): name='server_test', imageId=2, flavorId=2, metadata={'hello': 'world', 'open': 'stack'}, personality={})) - req = webob.Request.blank('/v1.0/servers') + req = webob.Request.blank('/v1.0/testacct/servers') req.method = 'POST' req.body = json.dumps(body) @@ -233,7 +233,7 @@ class ServersTest(test.TestCase): self.assertEqual(res.status_int, 200) def test_update_no_body(self): - req = webob.Request.blank('/v1.0/servers/1') + req = webob.Request.blank('/v1.0/testacct/servers/1') req.method = 'PUT' res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 422) @@ -251,7 +251,7 @@ class ServersTest(test.TestCase): self.stubs.Set(nova.db.api, 'instance_update', server_update) - req = webob.Request.blank('/v1.0/servers/1') + req = webob.Request.blank('/v1.0/testacct/servers/1') req.method = 'PUT' req.body = self.body req.get_response(fakes.wsgi_app()) @@ -267,30 +267,30 @@ class ServersTest(test.TestCase): self.stubs.Set(nova.db.api, 'instance_update', server_update) - req = webob.Request.blank('/v1.0/servers/1') + req = webob.Request.blank('/v1.0/testacct/servers/1') req.method = 'PUT' req.body = self.body req.get_response(fakes.wsgi_app()) def test_create_backup_schedules(self): - req = webob.Request.blank('/v1.0/servers/1/backup_schedules') + req = webob.Request.blank('/v1.0/testacct/servers/1/backup_schedules') req.method = 'POST' res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status, '404 Not Found') def test_delete_backup_schedules(self): - req = webob.Request.blank('/v1.0/servers/1/backup_schedules') + req = webob.Request.blank('/v1.0/testacct/servers/1/backup_schedules') req.method = 'DELETE' res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status, '404 Not Found') def test_get_server_backup_schedules(self): - req = webob.Request.blank('/v1.0/servers/1/backup_schedules') + req = webob.Request.blank('/v1.0/testacct/servers/1/backup_schedules') res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status, '404 Not Found') def test_get_all_server_details(self): - req = webob.Request.blank('/v1.0/servers/detail') + req = webob.Request.blank('/v1.0/testacct/servers/detail') res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) @@ -321,7 +321,7 @@ class ServersTest(test.TestCase): self.stubs.Set(nova.db.api, 'instance_get_all_by_user', return_servers_with_host) - req = webob.Request.blank('/v1.0/servers/detail') + req = webob.Request.blank('/v1.0/testacct/servers/detail') res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) @@ -341,7 +341,7 @@ class ServersTest(test.TestCase): body = dict(server=dict( name='server_test', imageId=2, flavorId=2, metadata={}, personality={})) - req = webob.Request.blank('/v1.0/servers/1/pause') + req = webob.Request.blank('/v1.0/testacct/servers/1/pause') req.method = 'POST' req.content_type = 'application/json' req.body = json.dumps(body) @@ -353,7 +353,7 @@ class ServersTest(test.TestCase): body = dict(server=dict( name='server_test', imageId=2, flavorId=2, metadata={}, personality={})) - req = webob.Request.blank('/v1.0/servers/1/unpause') + req = webob.Request.blank('/v1.0/testacct/servers/1/unpause') req.method = 'POST' req.content_type = 'application/json' req.body = json.dumps(body) @@ -365,7 +365,7 @@ class ServersTest(test.TestCase): body = dict(server=dict( name='server_test', imageId=2, flavorId=2, metadata={}, personality={})) - req = webob.Request.blank('/v1.0/servers/1/suspend') + req = webob.Request.blank('/v1.0/testacct/servers/1/suspend') req.method = 'POST' req.content_type = 'application/json' req.body = json.dumps(body) @@ -377,7 +377,7 @@ class ServersTest(test.TestCase): body = dict(server=dict( name='server_test', imageId=2, flavorId=2, metadata={}, personality={})) - req = webob.Request.blank('/v1.0/servers/1/resume') + req = webob.Request.blank('/v1.0/testacct/servers/1/resume') req.method = 'POST' req.content_type = 'application/json' req.body = json.dumps(body) @@ -389,7 +389,7 @@ class ServersTest(test.TestCase): body = dict(server=dict( name='server_test', imageId=2, flavorId=2, metadata={}, personality={})) - req = webob.Request.blank('/v1.0/servers/1/reset_network') + req = webob.Request.blank('/v1.0/testacct/servers/1/reset_network') req.method = 'POST' req.content_type = 'application/json' req.body = json.dumps(body) @@ -401,7 +401,8 @@ class ServersTest(test.TestCase): body = dict(server=dict( name='server_test', imageId=2, flavorId=2, metadata={}, personality={})) - req = webob.Request.blank('/v1.0/servers/1/inject_network_info') + req = webob.Request.blank( + '/v1.0/testacct/servers/1/inject_network_info') req.method = 'POST' req.content_type = 'application/json' req.body = json.dumps(body) @@ -409,13 +410,13 @@ class ServersTest(test.TestCase): self.assertEqual(res.status_int, 202) def test_server_diagnostics(self): - req = webob.Request.blank("/v1.0/servers/1/diagnostics") + req = webob.Request.blank("/v1.0/testacct/servers/1/diagnostics") req.method = "GET" res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 404) def test_server_actions(self): - req = webob.Request.blank("/v1.0/servers/1/actions") + req = webob.Request.blank("/v1.0/testacct/servers/1/actions") req.method = "GET" res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 404) @@ -424,7 +425,7 @@ class ServersTest(test.TestCase): body = dict(server=dict( name='server_test', imageId=2, flavorId=2, metadata={}, personality={})) - req = webob.Request.blank('/v1.0/servers/1/action') + req = webob.Request.blank('/v1.0/testacct/servers/1/action') req.method = 'POST' req.content_type = 'application/json' req.body = json.dumps(body) @@ -434,7 +435,7 @@ class ServersTest(test.TestCase): body = dict(server=dict( name='server_test', imageId=2, flavorId=2, metadata={}, personality={})) - req = webob.Request.blank('/v1.0/servers/1/action') + req = webob.Request.blank('/v1.0/testacct/servers/1/action') req.method = 'POST' req.content_type = 'application/json' req.body = json.dumps(body) @@ -444,14 +445,14 @@ class ServersTest(test.TestCase): body = dict(server=dict( name='server_test', imageId=2, flavorId=2, metadata={}, personality={})) - req = webob.Request.blank('/v1.0/servers/1/action') + req = webob.Request.blank('/v1.0/testacct/servers/1/action') req.method = 'POST' req.content_type = 'application/json' req.body = json.dumps(body) res = req.get_response(fakes.wsgi_app()) def test_delete_server_instance(self): - req = webob.Request.blank('/v1.0/servers/1') + req = webob.Request.blank('/v1.0/testacct/servers/1') req.method = 'DELETE' self.server_delete_called = False diff --git a/nova/tests/api/openstack/test_users.py b/nova/tests/api/openstack/test_users.py new file mode 100644 index 000000000000..bd32254cd50a --- /dev/null +++ b/nova/tests/api/openstack/test_users.py @@ -0,0 +1,139 @@ +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import stubout +import webob +import json + +import nova.api +import nova.api.openstack.auth +from nova import context +from nova import flags +from nova import test +from nova.auth.manager import User, Project +from nova.tests.api.openstack import fakes + + +FLAGS = flags.FLAGS +FLAGS.verbose = True + + +def fake_init(self): + self.manager = fakes.FakeAuthManager() + + +def fake_admin_check(self, req): + return True + + +class UsersTest(test.TestCase): + def setUp(self): + super(UsersTest, self).setUp() + self.stubs = stubout.StubOutForTesting() + self.stubs.Set(nova.api.openstack.users.Controller, '__init__', + fake_init) + self.stubs.Set(nova.api.openstack.users.Controller, '_check_admin', + fake_admin_check) + fakes.FakeAuthManager.auth_data = {} + fakes.FakeAuthManager.projects = dict(testacct=Project('testacct', + 'testacct', + 'guy1', + 'test', + [])) + fakes.FakeAuthDatabase.data = {} + fakes.stub_out_networking(self.stubs) + fakes.stub_out_rate_limiting(self.stubs) + fakes.stub_out_auth(self.stubs) + + self.allow_admin = FLAGS.allow_admin_api + FLAGS.allow_admin_api = True + fakemgr = fakes.FakeAuthManager() + fakemgr.add_user('acc1', User('guy1', 'guy1', 'acc1', + 'fortytwo!', False)) + fakemgr.add_user('acc2', User('guy2', 'guy2', 'acc2', + 'swordfish', True)) + + def tearDown(self): + self.stubs.UnsetAll() + FLAGS.allow_admin_api = self.allow_admin + super(UsersTest, self).tearDown() + + def test_get_user_list(self): + req = webob.Request.blank('/v1.0/testacct/users') + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 200) + self.assertEqual(len(res_dict['users']), 2) + + def test_get_user_by_id(self): + req = webob.Request.blank('/v1.0/testacct/users/guy2') + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res_dict['user']['id'], 'guy2') + self.assertEqual(res_dict['user']['name'], 'guy2') + self.assertEqual(res_dict['user']['secret'], 'swordfish') + self.assertEqual(res_dict['user']['admin'], True) + self.assertEqual(res.status_int, 200) + + def test_user_delete(self): + req = webob.Request.blank('/v1.0/testacct/users/guy1') + req.method = 'DELETE' + res = req.get_response(fakes.wsgi_app()) + self.assertTrue('guy1' not in [u.id for u in + fakes.FakeAuthManager.auth_data.values()]) + self.assertEqual(res.status_int, 200) + + def test_user_create(self): + body = dict(user=dict(name='test_guy', + access='acc3', + secret='invasionIsInNormandy', + admin=True)) + req = webob.Request.blank('/v1.0/testacct/users') + req.method = 'POST' + req.body = json.dumps(body) + + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 200) + self.assertEqual(res_dict['user']['id'], 'test_guy') + self.assertEqual(res_dict['user']['name'], 'test_guy') + self.assertEqual(res_dict['user']['access'], 'acc3') + self.assertEqual(res_dict['user']['secret'], 'invasionIsInNormandy') + self.assertEqual(res_dict['user']['admin'], True) + self.assertTrue('test_guy' in [u.id for u in + fakes.FakeAuthManager.auth_data.values()]) + self.assertEqual(len(fakes.FakeAuthManager.auth_data.values()), 3) + + def test_user_update(self): + body = dict(user=dict(name='guy2', + access='acc2', + secret='invasionIsInNormandy')) + req = webob.Request.blank('/v1.0/testacct/users/guy2') + req.method = 'PUT' + req.body = json.dumps(body) + + res = req.get_response(fakes.wsgi_app()) + res_dict = json.loads(res.body) + + self.assertEqual(res.status_int, 200) + self.assertEqual(res_dict['user']['id'], 'guy2') + self.assertEqual(res_dict['user']['name'], 'guy2') + self.assertEqual(res_dict['user']['access'], 'acc2') + self.assertEqual(res_dict['user']['secret'], 'invasionIsInNormandy') + self.assertEqual(res_dict['user']['admin'], True) diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py index 555b206b9dc4..51f13af48c80 100644 --- a/nova/tests/api/openstack/test_zones.py +++ b/nova/tests/api/openstack/test_zones.py @@ -64,7 +64,7 @@ class ZonesTest(test.TestCase): def setUp(self): super(ZonesTest, self).setUp() self.stubs = stubout.StubOutForTesting() - fakes.FakeAuthManager.auth_data = {} + fakes.FakeAuthManager.reset_fake_data() fakes.FakeAuthDatabase.data = {} fakes.stub_out_networking(self.stubs) fakes.stub_out_rate_limiting(self.stubs) @@ -85,7 +85,7 @@ class ZonesTest(test.TestCase): super(ZonesTest, self).tearDown() def test_get_zone_list(self): - req = webob.Request.blank('/v1.0/zones') + req = webob.Request.blank('/v1.0/testacct/zones') res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) @@ -93,7 +93,7 @@ class ZonesTest(test.TestCase): self.assertEqual(len(res_dict['zones']), 2) def test_get_zone_by_id(self): - req = webob.Request.blank('/v1.0/zones/1') + req = webob.Request.blank('/v1.0/testacct/zones/1') res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) @@ -103,7 +103,7 @@ class ZonesTest(test.TestCase): self.assertEqual(res.status_int, 200) def test_zone_delete(self): - req = webob.Request.blank('/v1.0/zones/1') + req = webob.Request.blank('/v1.0/testacct/zones/1') res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) @@ -111,7 +111,7 @@ class ZonesTest(test.TestCase): def test_zone_create(self): body = dict(zone=dict(api_url='http://blah.zoo', username='fred', password='fubar')) - req = webob.Request.blank('/v1.0/zones') + req = webob.Request.blank('/v1.0/testacct/zones') req.method = 'POST' req.body = json.dumps(body) @@ -125,7 +125,7 @@ class ZonesTest(test.TestCase): def test_zone_update(self): body = dict(zone=dict(username='zeb', password='sneaky')) - req = webob.Request.blank('/v1.0/zones/1') + req = webob.Request.blank('/v1.0/testacct/zones/1') req.method = 'PUT' req.body = json.dumps(body) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 7531af4ec72f..a45d32cb29a4 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -207,7 +207,7 @@ def _upload_tarball(staging_path, image_id, glance_host, glance_port): 'transfer-encoding': 'chunked', 'x-image-meta-is_public': 'True', 'x-image-meta-status': 'queued', - 'x-image-meta-type': 'vhd' + 'x-image-meta-type': 'vhd', } for header, value in headers.iteritems(): conn.putheader(header, value) From 137a4946785b9460aadb9fe40f2b0e18bd7f6063 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Fri, 4 Mar 2011 01:09:21 +0900 Subject: [PATCH 15/76] Merged to trunk rev 757. Main changes are below. 1. Rename db table ComputeService -> ComputeNode 2. nova-manage option instance_type is reserved and we cannot use option instance, so change instance -> vm. --- bin/nova-manage | 4 +-- nova/db/api.py | 12 ++++----- nova/db/sqlalchemy/api.py | 26 +++++++++---------- .../versions/009_add_live_migration.py | 8 +++--- nova/db/sqlalchemy/models.py | 10 +++---- nova/scheduler/driver.py | 10 +++---- nova/scheduler/manager.py | 14 +++++----- nova/tests/test_scheduler.py | 16 ++++++------ nova/tests/test_virt.py | 20 +++++++------- nova/virt/libvirt_conn.py | 10 +++---- 10 files changed, 65 insertions(+), 65 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index f41950cd2a58..d782f60287cd 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -546,7 +546,7 @@ class NetworkCommands(object): network.dns) -class InstanceCommands(object): +class VmCommands(object): """Class for mangaging VM instances.""" def live_migration(self, ec2_id, dest): @@ -831,7 +831,7 @@ CATEGORIES = [ ('fixed', FixedIpCommands), ('floating', FloatingIpCommands), ('network', NetworkCommands), - ('instance', InstanceCommands), + ('vm', VmCommands), ('service', ServiceCommands), ('log', LogCommands), ('db', DbCommands), diff --git a/nova/db/api.py b/nova/db/api.py index 13bc07ad2068..3b427cefed97 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -153,24 +153,24 @@ def service_update(context, service_id, values): ################### -def compute_service_get(context, compute_id, session=None): +def compute_node_get(context, compute_id, session=None): """Get an computeService or raise if it does not exist.""" - return IMPL.compute_service_get(context, compute_id) + return IMPL.compute_node_get(context, compute_id) -def compute_service_create(context, values): +def compute_node_create(context, values): """Create a computeService from the values dictionary.""" - return IMPL.compute_service_create(context, values) + return IMPL.compute_node_create(context, values) -def compute_service_update(context, compute_id, values): +def compute_node_update(context, compute_id, values): """Set the given properties on an computeService and update it. Raises NotFound if computeService does not exist. """ - return IMPL.compute_service_update(context, compute_id, values) + return IMPL.compute_node_update(context, compute_id, values) ################### diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index bed621b18781..69aa07279316 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -119,8 +119,8 @@ def service_destroy(context, service_id): service_ref.delete(session=session) if service_ref.topic == 'compute' and \ - len(service_ref.compute_service) != 0: - for c in service_ref.compute_service: + len(service_ref.compute_node) != 0: + for c in service_ref.compute_node: c.delete(session=session) @@ -130,7 +130,7 @@ def service_get(context, service_id, session=None): session = get_session() result = session.query(models.Service).\ - options(joinedload('compute_service')).\ + options(joinedload('compute_node')).\ filter_by(id=service_id).\ filter_by(deleted=can_read_deleted(context)).\ first() @@ -174,7 +174,7 @@ def service_get_all_compute_by_host(context, host): topic = 'compute' session = get_session() result = session.query(models.Service).\ - options(joinedload('compute_service')).\ + options(joinedload('compute_node')).\ filter_by(deleted=False).\ filter_by(host=host).\ filter_by(topic=topic).\ @@ -298,11 +298,11 @@ def service_update(context, service_id, values): @require_admin_context -def compute_service_get(context, compute_id, session=None): +def compute_node_get(context, compute_id, session=None): if not session: session = get_session() - result = session.query(models.ComputeService).\ + result = session.query(models.ComputeNode).\ filter_by(id=compute_id).\ filter_by(deleted=can_read_deleted(context)).\ first() @@ -314,18 +314,18 @@ def compute_service_get(context, compute_id, session=None): @require_admin_context -def compute_service_create(context, values): - compute_service_ref = models.ComputeService() - compute_service_ref.update(values) - compute_service_ref.save() - return compute_service_ref +def compute_node_create(context, values): + compute_node_ref = models.ComputeNode() + compute_node_ref.update(values) + compute_node_ref.save() + return compute_node_ref @require_admin_context -def compute_service_update(context, compute_id, values): +def compute_node_update(context, compute_id, values): session = get_session() with session.begin(): - compute_ref = compute_service_get(context, compute_id, session=session) + compute_ref = compute_node_get(context, compute_id, session=session) compute_ref.update(values) compute_ref.save(session=session) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/009_add_live_migration.py b/nova/db/sqlalchemy/migrate_repo/versions/009_add_live_migration.py index 2689b5b7485d..23ccccb4e3e2 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/009_add_live_migration.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/009_add_live_migration.py @@ -31,7 +31,7 @@ instances = Table('instances', meta, # New Tables # -compute_services = Table('compute_services', meta, +compute_nodes = Table('compute_nodes', meta, Column('created_at', DateTime(timezone=False)), Column('updated_at', DateTime(timezone=False)), Column('deleted_at', DateTime(timezone=False)), @@ -73,11 +73,11 @@ def upgrade(migrate_engine): meta.bind = migrate_engine try: - compute_services.create() + compute_nodes.create() except Exception: - logging.info(repr(compute_services)) + logging.info(repr(compute_nodes)) logging.exception('Exception while creating table') - meta.drop_all(tables=[compute_services]) + meta.drop_all(tables=[compute_nodes]) raise instances.create_column(instances_launched_on) diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 2af7377efad4..8646190f35d1 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -113,18 +113,18 @@ class Service(BASE, NovaBase): availability_zone = Column(String(255), default='nova') -class ComputeService(BASE, NovaBase): +class ComputeNode(BASE, NovaBase): """Represents a running compute service on a host.""" - __tablename__ = 'compute_services' + __tablename__ = 'compute_nodes' id = Column(Integer, primary_key=True) service_id = Column(Integer, ForeignKey('services.id'), nullable=True) service = relationship(Service, - backref=backref('compute_service'), + backref=backref('compute_node'), foreign_keys=service_id, primaryjoin='and_(' - 'ComputeService.service_id == Service.id,' - 'ComputeService.deleted == False)') + 'ComputeNode.service_id == Service.id,' + 'ComputeNode.deleted == False)') vcpus = Column(Integer, nullable=True) memory_mb = Column(Integer, nullable=True) diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index 4485ba39fff2..791f9000d37a 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -190,7 +190,7 @@ class Scheduler(object): # Checking dest exists. dservice_refs = db.service_get_all_compute_by_host(context, dest) - dservice_ref = dservice_refs[0]['compute_service'][0] + dservice_ref = dservice_refs[0]['compute_node'][0] # Checking original host( where instance was launched at) exists. try: @@ -200,7 +200,7 @@ class Scheduler(object): raise exception.Invalid(_("host %s where instance was launched " "does not exist.") % instance_ref['launched_on']) - oservice_ref = oservice_refs[0]['compute_service'][0] + oservice_ref = oservice_refs[0]['compute_node'][0] # Checking hypervisor is same. orig_hypervisor = oservice_ref['hypervisor_type'] @@ -252,10 +252,10 @@ class Scheduler(object): # Getting host information service_refs = db.service_get_all_compute_by_host(context, dest) - compute_service_ref = service_refs[0]['compute_service'][0] + compute_node_ref = service_refs[0]['compute_node'][0] - mem_total = int(compute_service_ref['memory_mb']) - mem_used = int(compute_service_ref['memory_mb_used']) + mem_total = int(compute_node_ref['memory_mb']) + mem_used = int(compute_node_ref['memory_mb_used']) mem_avail = mem_total - mem_used mem_inst = instance_ref['memory_mb'] if mem_avail <= mem_inst: diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index a50d3ab201ac..090d8b89dcb0 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -87,13 +87,13 @@ class SchedulerManager(manager.Manager): compute_ref = compute_ref[0] # Getting physical resource information - compute_service_ref = compute_ref['compute_service'][0] - resource = {'vcpus': compute_service_ref['vcpus'], - 'memory_mb': compute_service_ref['memory_mb'], - 'local_gb': compute_service_ref['local_gb'], - 'vcpus_used': compute_service_ref['vcpus_used'], - 'memory_mb_used': compute_service_ref['memory_mb_used'], - 'local_gb_used': compute_service_ref['local_gb_used']} + compute_node_ref = compute_ref['compute_node'][0] + resource = {'vcpus': compute_node_ref['vcpus'], + 'memory_mb': compute_node_ref['memory_mb'], + 'local_gb': compute_node_ref['local_gb'], + 'vcpus_used': compute_node_ref['vcpus_used'], + 'memory_mb_used': compute_node_ref['memory_mb_used'], + 'local_gb_used': compute_node_ref['local_gb_used']} # Getting usage resource information usage = {} diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index 62db42b115d6..711b66af70d3 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -60,7 +60,7 @@ class SchedulerTestCase(test.TestCase): self.flags(scheduler_driver='nova.tests.test_scheduler.TestDriver') def _create_compute_service(self): - """Create compute-manager(ComputeService and Service record).""" + """Create compute-manager(ComputeNode and Service record).""" ctxt = context.get_admin_context() dic = {'host': 'dummy', 'binary': 'nova-compute', 'topic': 'compute', 'report_count': 0, 'availability_zone': 'dummyzone'} @@ -71,7 +71,7 @@ class SchedulerTestCase(test.TestCase): 'vcpus_used': 16, 'memory_mb_used': 32, 'local_gb_used': 10, 'hypervisor_type': 'qemu', 'hypervisor_version': 12003, 'cpu_info': ''} - db.compute_service_create(ctxt, dic) + db.compute_node_create(ctxt, dic) return db.service_get(ctxt, s_ref['id']) @@ -144,8 +144,8 @@ class SchedulerTestCase(test.TestCase): # result checking c1 = ('resource' in result and 'usage' in result) - compute_service = s_ref['compute_service'][0] - c2 = self._dic_is_equal(result['resource'], compute_service) + compute_node = s_ref['compute_node'][0] + c2 = self._dic_is_equal(result['resource'], compute_node) c3 = result['usage'] == {} self.assertTrue(c1 and c2 and c3) db.service_destroy(ctxt, s_ref['id']) @@ -163,8 +163,8 @@ class SchedulerTestCase(test.TestCase): result = scheduler.show_host_resources(ctxt, s_ref['host']) c1 = ('resource' in result and 'usage' in result) - compute_service = s_ref['compute_service'][0] - c2 = self._dic_is_equal(result['resource'], compute_service) + compute_node = s_ref['compute_node'][0] + c2 = self._dic_is_equal(result['resource'], compute_node) c3 = result['usage'].keys() == ['p-01', 'p-02'] keys = ['vcpus', 'memory_mb', 'local_gb'] c4 = self._dic_is_equal(result['usage']['p-01'], i_ref1, keys) @@ -301,7 +301,7 @@ class SimpleDriverTestCase(test.TestCase): dic['memory_mb_used'] = kwargs.get('memory_mb_used', 32) dic['hypervisor_type'] = kwargs.get('hypervisor_type', 'qemu') dic['hypervisor_version'] = kwargs.get('hypervisor_version', 12003) - db.compute_service_create(self.context, dic) + db.compute_node_create(self.context, dic) return db.service_get(self.context, s_ref['id']) def test_doesnt_report_disabled_hosts_as_up(self): @@ -923,7 +923,7 @@ class SimpleDriverTestCase(test.TestCase): self.mox.StubOutWithMock(rpc, 'call', use_mock_anything=True) rpc.call(mox.IgnoreArg(), mox.IgnoreArg(), {"method": 'compare_cpu', - "args": {'cpu_info': s_ref2['compute_service'][0]['cpu_info']}}).\ + "args": {'cpu_info': s_ref2['compute_node'][0]['cpu_info']}}).\ AndRaise(rpc.RemoteError("doesn't have compatibility to", "", "")) self.mox.ReplayAll() diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 17b80c294887..aac55a894175 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -275,7 +275,7 @@ class LibvirtConnTestCase(test.TestCase): db.instance_destroy(user_context, instance_ref['id']) def test_update_available_resource_works_correctly(self): - """Confirm compute_service table is updated successfully.""" + """Confirm compute_node table is updated successfully.""" org_path = FLAGS.instances_path = '' FLAGS.instances_path = '.' @@ -289,16 +289,16 @@ class LibvirtConnTestCase(test.TestCase): conn = libvirt_conn.LibvirtConnection(False) conn.update_available_resource(self.context, 'dummy') service_ref = db.service_get(self.context, service_ref['id']) - compute_service = service_ref['compute_service'][0] + compute_node = service_ref['compute_node'][0] - c1 = (compute_service['vcpus'] > 0) - c2 = (compute_service['memory_mb'] > 0) - c3 = (compute_service['local_gb'] > 0) - c4 = (compute_service['vcpus_used'] == 0) - c5 = (compute_service['memory_mb_used'] > 0) - c6 = (compute_service['local_gb_used'] > 0) - c7 = (len(compute_service['hypervisor_type']) > 0) - c8 = (compute_service['hypervisor_version'] > 0) + c1 = (compute_node['vcpus'] > 0) + c2 = (compute_node['memory_mb'] > 0) + c3 = (compute_node['local_gb'] > 0) + c4 = (compute_node['vcpus_used'] == 0) + c5 = (compute_node['memory_mb_used'] > 0) + c6 = (compute_node['local_gb_used'] > 0) + c7 = (len(compute_node['hypervisor_type']) > 0) + c8 = (compute_node['hypervisor_version'] > 0) self.assertTrue(c1 and c2 and c3 and c4 and c5 and c6 and c7 and c8) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index b9abf1890021..71ca508b0637 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -1021,7 +1021,7 @@ class LibvirtConnection(object): self.firewall_driver.refresh_security_group_members(security_group_id) def update_available_resource(self, ctxt, host): - """Updates compute manager resource info on ComputeService table. + """Updates compute manager resource info on ComputeNode table. This method is called when nova-coompute launches, and whenever admin executes "nova-manage service update_resource". @@ -1049,14 +1049,14 @@ class LibvirtConnection(object): 'hypervisor_version': self.get_hypervisor_version(), 'cpu_info': self.get_cpu_info()} - compute_service_ref = service_ref['compute_service'] - if not compute_service_ref: + compute_node_ref = service_ref['compute_node'] + if not compute_node_ref: LOG.info(_('Compute_service record is created for %s ') % host) dic['service_id'] = service_ref['id'] - db.compute_service_create(ctxt, dic) + db.compute_node_create(ctxt, dic) else: LOG.info(_('Compute_service record is updated for %s ') % host) - db.compute_service_update(ctxt, compute_service_ref[0]['id'], dic) + db.compute_node_update(ctxt, compute_node_ref[0]['id'], dic) def compare_cpu(self, cpu_info): """Checks the host cpu is compatible to a cpu given by xml. From bc6cc457132b096150dcd9ff2ed2909585a80484 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Fri, 4 Mar 2011 01:17:05 +0900 Subject: [PATCH 16/76] some comments are modified --- nova/db/api.py | 8 ++++---- nova/db/sqlalchemy/api.py | 2 +- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/nova/db/api.py b/nova/db/api.py index 3b427cefed97..43e1c2183ebf 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -154,19 +154,19 @@ def service_update(context, service_id, values): def compute_node_get(context, compute_id, session=None): - """Get an computeService or raise if it does not exist.""" + """Get an computeNode or raise if it does not exist.""" return IMPL.compute_node_get(context, compute_id) def compute_node_create(context, values): - """Create a computeService from the values dictionary.""" + """Create a computeNode from the values dictionary.""" return IMPL.compute_node_create(context, values) def compute_node_update(context, compute_id, values): - """Set the given properties on an computeService and update it. + """Set the given properties on an computeNode and update it. - Raises NotFound if computeService does not exist. + Raises NotFound if computeNode does not exist. """ diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 69aa07279316..b305543ffdf1 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -308,7 +308,7 @@ def compute_node_get(context, compute_id, session=None): first() if not result: - raise exception.NotFound(_('No computeService for id %s') % compute_id) + raise exception.NotFound(_('No computeNode for id %s') % compute_id) return result From 417f6ca5c54878a6bea4d545126f93ecb6a043b4 Mon Sep 17 00:00:00 2001 From: Monsyne Dragon Date: Thu, 3 Mar 2011 22:22:00 +0000 Subject: [PATCH 17/76] localize a few error messages. --- nova/api/openstack/accounts.py | 2 +- nova/api/openstack/users.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/api/openstack/accounts.py b/nova/api/openstack/accounts.py index 264fdab99c45..3b90d27765bb 100644 --- a/nova/api/openstack/accounts.py +++ b/nova/api/openstack/accounts.py @@ -47,7 +47,7 @@ class Controller(wsgi.Controller): """ We cannot depend on the db layer to check for admin access for the auth manager, so we do it here """ if not context.is_admin: - raise exception.NotAuthorized("Not admin user.") + raise exception.NotAuthorized(_("Not admin user.")) def show(self, req, id): """Return data about the given account id""" diff --git a/nova/api/openstack/users.py b/nova/api/openstack/users.py index c0b7544f953c..ae3bf77918d2 100644 --- a/nova/api/openstack/users.py +++ b/nova/api/openstack/users.py @@ -48,7 +48,7 @@ class Controller(wsgi.Controller): """ We cannot depend on the db layer to check for admin access for the auth manager, so we do it here """ if not context.is_admin: - raise exception.NotAuthorized("Not admin user") + raise exception.NotAuthorized(_("Not admin user")) def index(self, req, **kw): """Return all users in brief""" From c5bfab9a0d213cee549371f05e74747cfcd8f998 Mon Sep 17 00:00:00 2001 From: Ryan Lane Date: Thu, 3 Mar 2011 23:05:00 +0000 Subject: [PATCH 18/76] Changing output of status from showing the user as the owner, to showing the project --- nova/api/ec2/cloud.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index b1917e9eadc5..cadda97dbbd7 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -562,7 +562,7 @@ class CloudController(object): if context.is_admin: v['status'] = '%s (%s, %s, %s, %s)' % ( volume['status'], - volume['user_id'], + volume['project_id'], volume['host'], instance_data, volume['mountpoint']) From 1f0df07baac52379b122a9928200305dd9d2151f Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Sat, 5 Mar 2011 00:57:08 +0900 Subject: [PATCH 19/76] Fixed based on reviewer's comment. Main changes are below. 1. get_vcpu_total()/get_memory_mb()/get_memory_mb_used() is changed for users who used non-linux environment. 2. test code added to test_virt. --- contrib/nova.sh | 1 + nova/tests/test_virt.py | 163 +++++++++++++++++++++++++++++++------- nova/virt/libvirt_conn.py | 12 ++- 3 files changed, 147 insertions(+), 29 deletions(-) diff --git a/contrib/nova.sh b/contrib/nova.sh index 1187f27287b5..cf5b3de11f9e 100755 --- a/contrib/nova.sh +++ b/contrib/nova.sh @@ -76,6 +76,7 @@ if [ "$CMD" == "install" ]; then sudo apt-get install -y python-migrate python-eventlet python-gflags python-ipy python-tempita sudo apt-get install -y python-libvirt python-libxml2 python-routes python-cheetah sudo apt-get install -y python-netaddr python-paste python-pastedeploy python-glance + sudo apt-get install -y python-multiprocessing if [ "$USE_IPV6" == 1 ]; then sudo apt-get install -y radvd diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index aac55a894175..5bb31659b5ad 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -15,6 +15,7 @@ # under the License. import mox +import sys from xml.etree.ElementTree import fromstring as xml_to_tree from xml.dom.minidom import parseString as xml_to_dom @@ -27,11 +28,15 @@ from nova import test from nova import utils from nova.api.ec2 import cloud from nova.auth import manager +from nova.compute import manager as compute_manager +from nova.compute import power_state from nova.db.sqlalchemy import models from nova.virt import libvirt_conn +libvirt = None FLAGS = flags.FLAGS flags.DECLARE('instances_path', 'nova.compute.manager') +flags.DECLARE('compute_driver', 'nova.compute.manager') class LibvirtConnTestCase(test.TestCase): @@ -73,31 +78,36 @@ class LibvirtConnTestCase(test.TestCase): 'bridge': 'br101', 'instance_type': 'm1.small'} + def lazy_load_library_exists(self): + """check if libvirt is available.""" + # try to connect libvirt. if fail, skip test. + try: + import libvirt + import libxml2 + except ImportError: + return False + global libvirt + libvirt = __import__('libvirt') + libvirt_conn.libvirt = __import__('libvirt') + libvirt_conn.libxml2 = __import__('libxml2') + return True + def create_fake_libvirt_mock(self, **kwargs): """Defining mocks for LibvirtConnection(libvirt is not used).""" - # A fake libvirt.virtConnect + # A fake libvirt.virConnect class FakeLibvirtConnection(object): - def getVersion(self): - return 12003 - - def getType(self): - return 'qemu' - - def getCapabilities(self): - return 'qemu' - - def listDomainsID(self): - return [] - - def getCapabilitied(self): - return + pass # A fake libvirt_conn.IptablesFirewallDriver class FakeIptablesFirewallDriver(object): + def __init__(self, **kwargs): pass + def setattr(self, key, val): + self.__setattr__(key, val) + # Creating mocks fake = FakeLibvirtConnection() fakeip = FakeIptablesFirewallDriver @@ -274,33 +284,54 @@ class LibvirtConnTestCase(test.TestCase): self.assertEquals(uri, testuri) db.instance_destroy(user_context, instance_ref['id']) - def test_update_available_resource_works_correctly(self): + def tes1t_update_available_resource_works_correctly(self): """Confirm compute_node table is updated successfully.""" org_path = FLAGS.instances_path = '' FLAGS.instances_path = '.' + # Prepare mocks + def getVersion(): + return 12003 + + def getType(): + return 'qemu' + + def listDomainsID(): + return [] + service_ref = self.create_service(host='dummy') - self.create_fake_libvirt_mock() + self.create_fake_libvirt_mock(getVersion=getVersion, + getType=getType, + listDomainsID=listDomainsID) self.mox.StubOutWithMock(libvirt_conn.LibvirtConnection, 'get_cpu_info') libvirt_conn.LibvirtConnection.get_cpu_info().AndReturn('cpuinfo') + # Start test self.mox.ReplayAll() conn = libvirt_conn.LibvirtConnection(False) conn.update_available_resource(self.context, 'dummy') service_ref = db.service_get(self.context, service_ref['id']) compute_node = service_ref['compute_node'][0] - c1 = (compute_node['vcpus'] > 0) - c2 = (compute_node['memory_mb'] > 0) - c3 = (compute_node['local_gb'] > 0) - c4 = (compute_node['vcpus_used'] == 0) - c5 = (compute_node['memory_mb_used'] > 0) - c6 = (compute_node['local_gb_used'] > 0) - c7 = (len(compute_node['hypervisor_type']) > 0) - c8 = (compute_node['hypervisor_version'] > 0) - - self.assertTrue(c1 and c2 and c3 and c4 and c5 and c6 and c7 and c8) + if sys.platform.upper() == 'LINUX2': + self.assertTrue(compute_node['vcpus'] > 0) + self.assertTrue(compute_node['memory_mb'] > 0) + self.assertTrue(compute_node['local_gb'] > 0) + self.assertTrue(compute_node['vcpus_used'] == 0) + self.assertTrue(compute_node['memory_mb_used'] > 0) + self.assertTrue(compute_node['local_gb_used'] > 0) + self.assertTrue(len(compute_node['hypervisor_type']) > 0) + self.assertTrue(compute_node['hypervisor_version'] > 0) + else: + self.assertTrue(compute_node['vcpus'] > 0) + self.assertTrue(compute_node['memory_mb'] == 0) + self.assertTrue(compute_node['local_gb'] > 0) + self.assertTrue(compute_node['vcpus_used'] == 0) + self.assertTrue(compute_node['memory_mb_used'] == 0) + self.assertTrue(compute_node['local_gb_used'] > 0) + self.assertTrue(len(compute_node['hypervisor_type']) > 0) + self.assertTrue(compute_node['hypervisor_version'] > 0) db.service_destroy(self.context, service_ref['id']) FLAGS.instances_path = org_path @@ -319,6 +350,84 @@ class LibvirtConnTestCase(test.TestCase): FLAGS.instances_path = org_path + def test_ensure_filtering_rules_for_instance_timeout(self): + """ensure_filtering_fules_for_instance() finishes with timeout.""" + # Skip if non-libvirt environment + if not self.lazy_load_library_exists(): + return + + # Preparing mocks + def fake_none(self): + return + + def fake_raise(self): + raise libvirt.libvirtError('ERR') + + self.create_fake_libvirt_mock(nwfilterLookupByName=fake_raise) + instance_ref = db.instance_create(self.context, self.test_instance) + + # Start test + self.mox.ReplayAll() + try: + conn = libvirt_conn.LibvirtConnection(False) + conn.firewall_driver.setattr('setup_basic_filtering', fake_none) + conn.firewall_driver.setattr('prepare_instance_filter', fake_none) + conn.ensure_filtering_rules_for_instance(instance_ref) + except exception.Error, e: + c1 = (0 <= e.message.find('Timeout migrating for')) + self.assertTrue(c1) + + db.instance_destroy(self.context, instance_ref['id']) + + def test_live_migration_raises_exception(self): + """Confirms recover method is called when exceptions are raised.""" + # Skip if non-libvirt environment + if not self.lazy_load_library_exists(): + return + + # Preparing data + self.compute = utils.import_object(FLAGS.compute_manager) + instance_dict = {'host': 'fake', 'state': power_state.RUNNING, + 'state_description': 'running'} + instance_ref = db.instance_create(self.context, self.test_instance) + instance_ref = db.instance_update(self.context, instance_ref['id'], + instance_dict) + vol_dict = {'status': 'migrating', 'size': 1} + volume_ref = db.volume_create(self.context, vol_dict) + db.volume_attached(self.context, volume_ref['id'], instance_ref['id'], + '/dev/fake') + + # Preparing mocks + vdmock = self.mox.CreateMock(libvirt.virDomain) + self.mox.StubOutWithMock(vdmock, "migrateToURI") + vdmock.migrateToURI(FLAGS.live_migration_uri % 'dest', + mox.IgnoreArg(), + None, FLAGS.live_migration_bandwidth).\ + AndRaise(libvirt.libvirtError('ERR')) + + def fake_lookup(instance_name): + if instance_name == instance_ref.name: + return vdmock + + self.create_fake_libvirt_mock(lookupByName=fake_lookup) + + # Start test + self.mox.ReplayAll() + conn = libvirt_conn.LibvirtConnection(False) + self.assertRaises(libvirt.libvirtError, + conn._live_migration, + self.context, instance_ref, 'dest', '', + self.compute.recover_live_migration) + + instance_ref = db.instance_get(self.context, instance_ref['id']) + self.assertTrue(instance_ref['state_description'] == 'running') + self.assertTrue(instance_ref['state'] == power_state.RUNNING) + volume_ref = db.volume_get(self.context, volume_ref['id']) + self.assertTrue(volume_ref['status'] == 'in-use') + + db.volume_destroy(self.context, volume_ref['id']) + db.instance_destroy(self.context, instance_ref['id']) + def tearDown(self): self.manager.delete_project(self.project) self.manager.delete_user(self.user) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 71ca508b0637..627a12a1c568 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -36,8 +36,10 @@ Supports KVM, QEMU, UML, and XEN. """ +import multiprocessing import os import shutil +import sys import random import subprocess import time @@ -858,7 +860,7 @@ class LibvirtConnection(object): """ - return open('/proc/cpuinfo').read().count('processor') + return multiprocessing.cpu_count() def get_memory_mb_total(self): """Get the total memory size(MB) of physical computer. @@ -867,6 +869,9 @@ class LibvirtConnection(object): """ + if sys.platform.upper() != 'LINUX2': + return 0 + meminfo = open('/proc/meminfo').read().split() idx = meminfo.index('MemTotal:') # transforming kb to mb. @@ -905,6 +910,9 @@ class LibvirtConnection(object): """ + if sys.platform.upper() != 'LINUX2': + return 0 + m = open('/proc/meminfo').read().split() idx1 = m.index('MemFree:') idx2 = m.index('Buffers:') @@ -1126,7 +1134,7 @@ class LibvirtConnection(object): # wait for completion timeout_count = range(FLAGS.live_migration_retry_count) - while not timeout_count: + while timeout_count: try: filter_name = 'nova-instance-%s' % instance_ref.name self._conn.nwfilterLookupByName(filter_name) From 23291a5e1a0134aff5fe030b52d4335a6f2a18d9 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Sat, 5 Mar 2011 01:07:12 +0900 Subject: [PATCH 20/76] delete unnecessary DECLARE --- nova/tests/test_virt.py | 1 - 1 file changed, 1 deletion(-) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 5bb31659b5ad..7ea8c0fb57d9 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -36,7 +36,6 @@ from nova.virt import libvirt_conn libvirt = None FLAGS = flags.FLAGS flags.DECLARE('instances_path', 'nova.compute.manager') -flags.DECLARE('compute_driver', 'nova.compute.manager') class LibvirtConnTestCase(test.TestCase): From 68d894be2ec3b4eaa14dc5c90143f45f7db1e4b8 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Fri, 4 Mar 2011 17:48:28 +0000 Subject: [PATCH 21/76] * Tests to verify correct vm-params for Windows and Linux instances --- nova/compute/api.py | 5 +- .../versions/007_add_os_type_to_instances.py | 4 +- nova/tests/db/fakes.py | 1 + nova/tests/test_xenapi.py | 99 ++++++++++++++++--- nova/virt/xenapi/vm_utils.py | 14 +-- nova/virt/xenapi/vmops.py | 8 +- 6 files changed, 101 insertions(+), 30 deletions(-) diff --git a/nova/compute/api.py b/nova/compute/api.py index 8bdf712a0b25..d79371e945db 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -125,7 +125,10 @@ class API(base.Base): raise quota.QuotaError(msg, "MetadataLimitExceeded") image = self.image_service.show(context, image_id) - os_type = image['properties'].get('os_type', 'linux') + + os_type = None + if 'properties' in image and 'os_type' in image['properties']: + os_type = image['properties']['os_type'] if kernel_id is None: kernel_id = image.get('kernel_id', None) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/007_add_os_type_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/007_add_os_type_to_instances.py index 21f21b040d90..d6d964b953a0 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/007_add_os_type_to_instances.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/007_add_os_type_to_instances.py @@ -34,7 +34,7 @@ instances_os_type = Column('os_type', String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, _warn_on_bytestring=False), - nullable=False) + nullable=True) def upgrade(migrate_engine): @@ -43,5 +43,3 @@ def upgrade(migrate_engine): meta.bind = migrate_engine instances.create_column(instances_os_type) - - diff --git a/nova/tests/db/fakes.py b/nova/tests/db/fakes.py index 05bdd172e9fb..facd6efae724 100644 --- a/nova/tests/db/fakes.py +++ b/nova/tests/db/fakes.py @@ -62,6 +62,7 @@ def stub_out_db_instance_api(stubs): 'mac_address': values['mac_address'], 'vcpus': type_data['vcpus'], 'local_gb': type_data['local_gb'], + 'os_type': values['os_type'] } return FakeModel(base_options) diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index b9bb6d5b4ded..24a5698e5b24 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -18,6 +18,7 @@ Test suite for XenAPI """ +import functools import stubout from nova import db @@ -41,6 +42,21 @@ from nova.tests.glance import stubs as glance_stubs FLAGS = flags.FLAGS +def stub_vm_utils_with_vdi_attached_here(function, should_return=True): + """ + vm_utils.with_vdi_attached_here needs to be stubbed out because it + calls down to the filesystem to attach a vdi. This provides a + decorator to handle that. + """ + @functools.wraps(function) + def decorated_function(self, *args, **kwargs): + orig_with_vdi_attached_here = vm_utils.with_vdi_attached_here + vm_utils.with_vdi_attached_here = lambda *x: should_return + function(self, *args, **kwargs) + vm_utils.with_vdi_attached_here = orig_with_vdi_attached_here + return decorated_function + + class XenAPIVolumeTestCase(test.TestCase): """ Unit tests for Volume operations @@ -62,6 +78,7 @@ class XenAPIVolumeTestCase(test.TestCase): 'ramdisk_id': 3, 'instance_type': 'm1.large', 'mac_address': 'aa:bb:cc:dd:ee:ff', + 'os_type': 'linux' } def _create_volume(self, size='0'): @@ -219,7 +236,7 @@ class XenAPIVMTestCase(test.TestCase): check() - def check_vm_record(self, conn): + def create_vm_record(self, conn, os_type): instances = conn.list_instances() self.assertEquals(instances, [1]) @@ -231,28 +248,63 @@ class XenAPIVMTestCase(test.TestCase): in xenapi_fake.get_all_records('VM').iteritems() if not rec['is_control_domain']] vm = vms[0] + self.vm_info = vm_info + self.vm = vm + def check_vm_record(self): # Check that m1.large above turned into the right thing. instance_type = instance_types.INSTANCE_TYPES['m1.large'] mem_kib = long(instance_type['memory_mb']) << 10 mem_bytes = str(mem_kib << 10) vcpus = instance_type['vcpus'] - self.assertEquals(vm_info['max_mem'], mem_kib) - self.assertEquals(vm_info['mem'], mem_kib) - self.assertEquals(vm['memory_static_max'], mem_bytes) - self.assertEquals(vm['memory_dynamic_max'], mem_bytes) - self.assertEquals(vm['memory_dynamic_min'], mem_bytes) - self.assertEquals(vm['VCPUs_max'], str(vcpus)) - self.assertEquals(vm['VCPUs_at_startup'], str(vcpus)) + self.assertEquals(self.vm_info['max_mem'], mem_kib) + self.assertEquals(self.vm_info['mem'], mem_kib) + self.assertEquals(self.vm['memory_static_max'], mem_bytes) + self.assertEquals(self.vm['memory_dynamic_max'], mem_bytes) + self.assertEquals(self.vm['memory_dynamic_min'], mem_bytes) + self.assertEquals(self.vm['VCPUs_max'], str(vcpus)) + self.assertEquals(self.vm['VCPUs_at_startup'], str(vcpus)) # Check that the VM is running according to Nova - self.assertEquals(vm_info['state'], power_state.RUNNING) + self.assertEquals(self.vm_info['state'], power_state.RUNNING) # Check that the VM is running according to XenAPI. - self.assertEquals(vm['power_state'], 'Running') + self.assertEquals(self.vm['power_state'], 'Running') + + def check_vm_params_for_windows(self): + self.assertEquals(self.vm['platform']['nx'], 'true') + self.assertEquals(self.vm['HVM_boot_params'], {'order': 'dc'}) + self.assertEquals(self.vm['HVM_boot_policy'], 'BIOS order') + + # check that these are not set + self.assertEquals(self.vm['PV_args'], '') + self.assertEquals(self.vm['PV_bootloader'], '') + self.assertEquals(self.vm['PV_kernel'], '') + self.assertEquals(self.vm['PV_ramdisk'], '') + + def check_vm_params_for_linux(self): + self.assertEquals(self.vm['platform']['nx'], 'false') + self.assertEquals(self.vm['PV_args'], 'clocksource=jiffies') + self.assertEquals(self.vm['PV_bootloader'], 'pygrub') + + # check that these are not set + self.assertEquals(self.vm['PV_kernel'], '') + self.assertEquals(self.vm['PV_ramdisk'], '') + self.assertEquals(self.vm['HVM_boot_params'], {}) + self.assertEquals(self.vm['HVM_boot_policy'], '') + + def check_vm_params_for_linux_with_external_kernel(self): + self.assertEquals(self.vm['platform']['nx'], 'false') + self.assertEquals(self.vm['PV_args'], 'root=/dev/xvda1') + self.assertNotEquals(self.vm['PV_kernel'], '') + self.assertNotEquals(self.vm['PV_ramdisk'], '') + + # check that these are not set + self.assertEquals(self.vm['HVM_boot_params'], {}) + self.assertEquals(self.vm['HVM_boot_policy'], '') def _test_spawn(self, image_id, kernel_id, ramdisk_id, - instance_type="m1.large"): + instance_type="m1.large", os_type="linux"): stubs.stubout_session(self.stubs, stubs.FakeSessionForVMTests) values = {'name': 1, 'id': 1, @@ -263,11 +315,13 @@ class XenAPIVMTestCase(test.TestCase): 'ramdisk_id': ramdisk_id, 'instance_type': instance_type, 'mac_address': 'aa:bb:cc:dd:ee:ff', + 'os_type': os_type } conn = xenapi_conn.get_connection(False) instance = db.instance_create(values) conn.spawn(instance) - self.check_vm_record(conn) + self.create_vm_record(conn, os_type) + self.check_vm_record() def test_spawn_not_enough_memory(self): FLAGS.xenapi_image_service = 'glance' @@ -283,24 +337,37 @@ class XenAPIVMTestCase(test.TestCase): FLAGS.xenapi_image_service = 'objectstore' self._test_spawn(1, 2, 3) + @stub_vm_utils_with_vdi_attached_here def test_spawn_raw_glance(self): FLAGS.xenapi_image_service = 'glance' self._test_spawn(glance_stubs.FakeGlance.IMAGE_RAW, None, None) + self.check_vm_params_for_linux() - def test_spawn_vhd_glance(self): + def test_spawn_vhd_glance_linux(self): FLAGS.xenapi_image_service = 'glance' - self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None) + self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None, + os_type="linux") + self.check_vm_params_for_linux() + + def test_spawn_vhd_glance_windows(self): + FLAGS.xenapi_image_service = 'glance' + self._test_spawn(glance_stubs.FakeGlance.IMAGE_VHD, None, None, + os_type="windows") + self.check_vm_params_for_windows() def test_spawn_glance(self): FLAGS.xenapi_image_service = 'glance' self._test_spawn(glance_stubs.FakeGlance.IMAGE_MACHINE, glance_stubs.FakeGlance.IMAGE_KERNEL, glance_stubs.FakeGlance.IMAGE_RAMDISK) + self.check_vm_params_for_linux_with_external_kernel() def tearDown(self): super(XenAPIVMTestCase, self).tearDown() self.manager.delete_project(self.project) self.manager.delete_user(self.user) + self.vm_info = None + self.vm = None self.stubs.UnsetAll() def _create_instance(self): @@ -314,7 +381,8 @@ class XenAPIVMTestCase(test.TestCase): 'kernel_id': 2, 'ramdisk_id': 3, 'instance_type': 'm1.large', - 'mac_address': 'aa:bb:cc:dd:ee:ff'} + 'mac_address': 'aa:bb:cc:dd:ee:ff', + 'os_type': 'linux'} instance = db.instance_create(values) self.conn.spawn(instance) return instance @@ -360,6 +428,7 @@ class XenAPIDetermineDiskImageTestCase(test.TestCase): self.fake_instance = FakeInstance() self.fake_instance.id = 42 + self.fake_instance.os_type = 'linux' def assert_disk_type(self, disk_type): dt = vm_utils.VMHelper.determine_disk_image_type( diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 9c0bb55790cd..a26e391df1b3 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -80,7 +80,8 @@ class VMHelper(HelperBase): """ @classmethod - def create_vm(cls, session, instance, kernel, ramdisk, use_pv_kernel=False): + def create_vm(cls, session, instance, kernel, ramdisk, + use_pv_kernel=False): """Create a VM record. Returns a Deferred that gives the new VM reference. the use_pv_kernel flag indicates whether the guest is HVM or PV @@ -319,7 +320,7 @@ class VMHelper(HelperBase): 'glance_host': FLAGS.glance_host, 'glance_port': FLAGS.glance_port, 'sr_path': get_sr_path(session), - 'os_type': instance.get('os_type', 'linux')} + 'os_type': instance.os_type} kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'upload_vhd', kwargs) @@ -524,7 +525,7 @@ class VMHelper(HelperBase): Determine whether the VM will use a paravirtualized kernel or if it will use hardware virtualization. - 1. Objectstore (any image type): + 1. Objectstore (any image type): We use plugin to figure out whether the VDI uses PV 2. Glance (VHD): then we use `os_type`, raise if not set @@ -540,7 +541,8 @@ class VMHelper(HelperBase): session, vdi_ref, disk_image_type, os_type) else: # 1. Objecstore - return cls._determine_is_pv_objectstore(session, instance_id, vdi_ref) + return cls._determine_is_pv_objectstore(session, instance_id, + vdi_ref) @classmethod def _determine_is_pv_objectstore(cls, session, instance_id, vdi_ref): @@ -564,7 +566,7 @@ class VMHelper(HelperBase): """ For a Glance image, determine if we need paravirtualization. - The relevant scenarios are: + The relevant scenarios are: 2. Glance (VHD): then we use `os_type`, raise if not set 3. Glance (DISK_RAW): use Pygrub to figure out if pv kernel is @@ -582,7 +584,7 @@ class VMHelper(HelperBase): is_pv = True elif disk_image_type == ImageType.DISK_RAW: # 3. RAW - is_pv = with_vdi_attached_here(session, vdi_ref, True, _is_vdi_pv) + is_pv = with_vdi_attached_here(session, vdi_ref, True, _is_vdi_pv) elif disk_image_type == ImageType.DISK: # 4. Disk is_pv = True diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 1edf39c5b7b7..eedb07a50944 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -87,8 +87,6 @@ class VMOps(object): vdi_ref = self._session.call_xenapi('VDI.get_by_uuid', vdi_uuid) - os_type = instance.get('os_type', 'linux') - kernel = None if instance.kernel_id: kernel = VMHelper.fetch_image(self._session, instance.id, @@ -99,8 +97,8 @@ class VMOps(object): ramdisk = VMHelper.fetch_image(self._session, instance.id, instance.ramdisk_id, user, project, ImageType.KERNEL_RAMDISK) - use_pv_kernel = VMHelper.determine_is_pv( - self._session, instance.id, vdi_ref, disk_image_type, os_type) + use_pv_kernel = VMHelper.determine_is_pv(self._session, instance.id, + vdi_ref, disk_image_type, instance.os_type) vm_ref = VMHelper.create_vm(self._session, instance, kernel, ramdisk, use_pv_kernel) @@ -242,7 +240,7 @@ class VMOps(object): finally: self._destroy(instance, template_vm_ref, shutdown=False, destroy_kernel_ramdisk=False) - + logging.debug(_("Finished snapshot and upload for VM %s"), instance) def reboot(self, instance): From e63cd9d5dc856f81477cf6c0e6c77ed7d1f4d70c Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Fri, 4 Mar 2011 22:17:53 +0000 Subject: [PATCH 22/76] * os_type is no longer `not null` --- ...o_instances.py => 009_add_os_type_to_instances.py} | 11 +++++++++++ nova/virt/xenapi/vm_utils.py | 8 ++++---- 2 files changed, 15 insertions(+), 4 deletions(-) rename nova/db/sqlalchemy/migrate_repo/versions/{007_add_os_type_to_instances.py => 009_add_os_type_to_instances.py} (84%) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/007_add_os_type_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/009_add_os_type_to_instances.py similarity index 84% rename from nova/db/sqlalchemy/migrate_repo/versions/007_add_os_type_to_instances.py rename to nova/db/sqlalchemy/migrate_repo/versions/009_add_os_type_to_instances.py index d6d964b953a0..a50f31e16bab 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/007_add_os_type_to_instances.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/009_add_os_type_to_instances.py @@ -17,6 +17,7 @@ # under the License. from sqlalchemy import * +from sqlalchemy.sql import text from migrate import * from nova import log as logging @@ -43,3 +44,13 @@ def upgrade(migrate_engine): meta.bind = migrate_engine instances.create_column(instances_os_type) + migrate_engine.execute(instances.update()\ + .where(instances.c.os_type==None)\ + .values(os_type='linux')) + + +def downgrade(migrate_engine): + meta.bind = migrate_engine + + instances.drop_column('os_type') + diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 7bff81b66469..150824400fed 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -117,7 +117,7 @@ class VMHelper(HelperBase): 'memory_target': mem, 'name_description': '', 'name_label': instance.name, -# 'other_config': {'allowvssprovider': False}, + 'other_config': {'allowvssprovider': False}, 'other_config': {}, 'PCI_bus': '', 'platform': {'acpi': 'true', 'apic': 'true', 'pae': 'true', @@ -313,14 +313,14 @@ class VMHelper(HelperBase): logging.debug(_("Asking xapi to upload %(vdi_uuids)s as" " ID %(image_id)s") % locals()) - # TODO(dubs): os_type is currently defaulting to linux, we actually - # want to make this a NOT NULL column and require it to be specified. + os_type = instance.os_type and instance.os_type or 'linux' + params = {'vdi_uuids': vdi_uuids, 'image_id': image_id, 'glance_host': FLAGS.glance_host, 'glance_port': FLAGS.glance_port, 'sr_path': get_sr_path(session), - 'os_type': instance.os_type} + 'os_type': os_type} kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'upload_vhd', kwargs) From 81fe66bb19d16c387705e144c9941096cc497cf0 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 7 Mar 2011 15:12:26 +0100 Subject: [PATCH 23/76] Make "dhcpbridge init" output correctly formatted leases information. --- bin/nova-dhcpbridge | 2 +- nova/network/linux_net.py | 30 +++++++++++++++++++++++++++--- 2 files changed, 28 insertions(+), 4 deletions(-) diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index 3dd9de367393..7ef51feba615 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -94,7 +94,7 @@ def init_leases(interface): """Get the list of hosts for an interface.""" ctxt = context.get_admin_context() network_ref = db.network_get_by_bridge(ctxt, interface) - return linux_net.get_dhcp_hosts(ctxt, network_ref['id']) + return linux_net.get_dhcp_leases(ctxt, network_ref['id']) def main(): diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 535ce87bcb70..0bcc3608107b 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -18,6 +18,7 @@ Implements vlans, bridges, and iptables rules using linux utilities. """ import os +import time from nova import db from nova import exception @@ -56,6 +57,8 @@ flags.DEFINE_bool('use_nova_chains', False, 'use the nova_ routing chains instead of default') flags.DEFINE_string('input_chain', 'INPUT', 'chain to add nova_input to') +flags.DEFINE_integer('dhcp_lease_time', 120, + 'Lifetime of a DHCP lease') flags.DEFINE_string('dns_server', None, 'if set, uses specific dns server for dnsmasq') @@ -273,8 +276,17 @@ def ensure_bridge(bridge, interface, net_attrs=None): _confirm_rule("FORWARD", "-j nova-local") +def get_dhcp_leases(context, network_id): + """Return a network's hosts config in dnsmasq leasefile format""" + hosts = [] + for fixed_ip_ref in db.network_get_associated_fixed_ips(context, + network_id): + hosts.append(_host_lease(fixed_ip_ref)) + return '\n'.join(hosts) + + def get_dhcp_hosts(context, network_id): - """Get a string containing a network's hosts config in dnsmasq format""" + """Get a string containing a network's hosts config in dhcp-host format""" hosts = [] for fixed_ip_ref in db.network_get_associated_fixed_ips(context, network_id): @@ -365,8 +377,19 @@ interface %s utils.get_my_linklocal(network_ref['bridge'])}) +def _host_lease(fixed_ip_ref): + """Return a host string for an address in leasefile format""" + instance_ref = fixed_ip_ref['instance'] + timestamp = time.mktime(instance_ref['updated_at'].timetuple()) + + return "%d %s %s %s" % (timestamp + FLAGS.dhcp_lease_time, + instance_ref['mac_address'], + instance_ref['hostname'], + fixed_ip_ref['address']) + + def _host_dhcp(fixed_ip_ref): - """Return a host string for an address""" + """Return a host string for an address in dhcp-host format""" instance_ref = fixed_ip_ref['instance'] return "%s,%s.%s,%s" % (instance_ref['mac_address'], instance_ref['hostname'], @@ -420,7 +443,8 @@ def _dnsmasq_cmd(net): ' --pid-file=%s' % _dhcp_file(net['bridge'], 'pid'), ' --listen-address=%s' % net['gateway'], ' --except-interface=lo', - ' --dhcp-range=%s,static,120s' % net['dhcp_start'], + ' --dhcp-range=%s,static,%ds' % (net['dhcp_start'], + FLAGS.dhcp_lease_time), ' --dhcp-hostsfile=%s' % _dhcp_file(net['bridge'], 'conf'), ' --dhcp-script=%s' % FLAGS.dhcpbridge, ' --leasefile-ro'] From b8a0fdca4df454a4d60df40d06ebd82bcc2ba3da Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Tue, 8 Mar 2011 14:35:53 +0000 Subject: [PATCH 24/76] * pep8 cleanups in migrations * a few bugfixes --- .../migrate_repo/versions/009_add_os_type_to_instances.py | 5 +---- nova/tests/test_xenapi.py | 4 ++-- nova/virt/xenapi/vm_utils.py | 2 +- 3 files changed, 4 insertions(+), 7 deletions(-) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/009_add_os_type_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/009_add_os_type_to_instances.py index a50f31e16bab..514b92b813a4 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/009_add_os_type_to_instances.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/009_add_os_type_to_instances.py @@ -29,8 +29,6 @@ instances = Table('instances', meta, Column('id', Integer(), primary_key=True, nullable=False), ) -# FIXME(dubs) should this be not null? Maybe create as nullable, then -# populate all existing rows with 'linux', then adding not null constraint. instances_os_type = Column('os_type', String(length=255, convert_unicode=False, assert_unicode=None, unicode_error=None, @@ -45,7 +43,7 @@ def upgrade(migrate_engine): instances.create_column(instances_os_type) migrate_engine.execute(instances.update()\ - .where(instances.c.os_type==None)\ + .where(instances.c.os_type == None)\ .values(os_type='linux')) @@ -53,4 +51,3 @@ def downgrade(migrate_engine): meta.bind = migrate_engine instances.drop_column('os_type') - diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 27f0e5dd723e..25070e108d7c 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -251,7 +251,7 @@ class XenAPIVMTestCase(test.TestCase): self.vm_info = vm_info self.vm = vm - def check_vm_record(self): + def check_vm_record(self, conn): # Check that m1.large above turned into the right thing. instance_type = db.instance_type_get_by_name(conn, 'm1.large') mem_kib = long(instance_type['memory_mb']) << 10 @@ -321,7 +321,7 @@ class XenAPIVMTestCase(test.TestCase): instance = db.instance_create(values) conn.spawn(instance) self.create_vm_record(conn, os_type) - self.check_vm_record() + self.check_vm_record(conn) def test_spawn_not_enough_memory(self): FLAGS.xenapi_image_service = 'glance' diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 150824400fed..604e8a4e05fd 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -324,7 +324,7 @@ class VMHelper(HelperBase): kwargs = {'params': pickle.dumps(params)} task = session.async_call_plugin('glance', 'upload_vhd', kwargs) - session.wait_for_task(task, instance_id) + session.wait_for_task(task, instance.id) @classmethod def fetch_image(cls, session, instance_id, image, user, project, From cbc2956a4e863c1bc952c7cef6045c39d293818d Mon Sep 17 00:00:00 2001 From: Monsyne Dragon Date: Tue, 8 Mar 2011 17:18:13 +0000 Subject: [PATCH 25/76] Remove addition of account to service url. --- nova/api/openstack/__init__.py | 24 ++---------- nova/api/openstack/auth.py | 46 +++++----------------- nova/auth/novarc.template | 2 +- nova/tests/api/openstack/test_accounts.py | 8 ++-- nova/tests/api/openstack/test_adminapi.py | 4 +- nova/tests/api/openstack/test_auth.py | 2 +- nova/tests/api/openstack/test_flavors.py | 4 +- nova/tests/api/openstack/test_images.py | 4 +- nova/tests/api/openstack/test_servers.py | 48 +++++++++++------------ nova/tests/api/openstack/test_users.py | 10 ++--- nova/tests/api/openstack/test_zones.py | 10 ++--- 11 files changed, 59 insertions(+), 103 deletions(-) diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index 005d330a67c0..a655b1c85036 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -73,18 +73,6 @@ class APIRouter(wsgi.Router): def __init__(self): mapper = routes.Mapper() - accounts_controller = accounts.Controller() - mapper.connect("account", "/{id}", - controller=accounts_controller, action="show", - conditions=dict(method=["GET"])) - if FLAGS.allow_admin_api: - mapper.connect("/{id}", - controller=accounts_controller, action="update", - conditions=dict(method=["PUT"])) - mapper.connect("/{id}", - controller=accounts_controller, action="delete", - conditions=dict(method=["DELETE"])) - server_members = {'action': 'POST'} if FLAGS.allow_admin_api: LOG.debug(_("Including admin operations in API.")) @@ -101,38 +89,34 @@ class APIRouter(wsgi.Router): server_members['inject_network_info'] = 'POST' mapper.resource("zone", "zones", controller=zones.Controller(), - path_prefix="{account_id}/", collection={'detail': 'GET'}) mapper.resource("user", "users", controller=users.Controller(), - path_prefix="{account_id}/", collection={'detail': 'GET'}) + mapper.resource("account", "accounts", + controller=accounts.Controller(), + collection={'detail': 'GET'}) + mapper.resource("server", "servers", controller=servers.Controller(), collection={'detail': 'GET'}, - path_prefix="{account_id}/", member=server_members) mapper.resource("backup_schedule", "backup_schedule", controller=backup_schedules.Controller(), - path_prefix="{account_id}/servers/{server_id}/", parent_resource=dict(member_name='server', collection_name='servers')) mapper.resource("console", "consoles", controller=consoles.Controller(), - path_prefix="{account_id}/servers/{server_id}/", parent_resource=dict(member_name='server', collection_name='servers')) mapper.resource("image", "images", controller=images.Controller(), - path_prefix="{account_id}/", collection={'detail': 'GET'}) mapper.resource("flavor", "flavors", controller=flavors.Controller(), - path_prefix="{account_id}/", collection={'detail': 'GET'}) mapper.resource("shared_ip_group", "shared_ip_groups", - path_prefix="{account_id}/", collection={'detail': 'GET'}, controller=shared_ip_groups.Controller()) diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index e77910fed1e6..e71fc69e3257 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -53,19 +53,15 @@ class AuthMiddleware(wsgi.Middleware): if not self.has_authentication(req): return self.authenticate(req) user = self.get_user_by_authentication(req) - account_name = req.path_info_peek() - + accounts = self.auth.get_projects(user=user) if not user: return faults.Fault(webob.exc.HTTPUnauthorized()) - if not account_name: - if self.auth.is_admin(user): - account_name = FLAGS.default_project - else: - return faults.Fault(webob.exc.HTTPUnauthorized()) - try: - account = self.auth.get_project(account_name) - except exception.NotFound: + if accounts: + #we are punting on this til auth is settled, + #and possibly til api v1.1 (mdragon) + account = accounts[0] + else: return faults.Fault(webob.exc.HTTPUnauthorized()) if not self.auth.is_admin(user) and \ @@ -85,7 +81,6 @@ class AuthMiddleware(wsgi.Middleware): # Unless the request is explicitly made against // don't # honor it path_info = req.path_info - account_name = None if len(path_info) > 1: return faults.Fault(webob.exc.HTTPUnauthorized()) @@ -95,10 +90,7 @@ class AuthMiddleware(wsgi.Middleware): except KeyError: return faults.Fault(webob.exc.HTTPUnauthorized()) - if ':' in username: - account_name, username = username.rsplit(':', 1) - - token, user = self._authorize_user(username, account_name, key, req) + token, user = self._authorize_user(username, key, req) if user and token: res = webob.Response() res.headers['X-Auth-Token'] = token.token_hash @@ -135,31 +127,15 @@ class AuthMiddleware(wsgi.Middleware): return self.auth.get_user(token.user_id) return None - def _authorize_user(self, username, account_name, key, req): + def _authorize_user(self, username, key, req): """Generates a new token and assigns it to a user. username - string - account_name - string key - string API key req - webob.Request object """ ctxt = context.get_admin_context() user = self.auth.get_user_from_access_key(key) - if account_name: - try: - account = self.auth.get_project(account_name) - except exception.NotFound: - return None, None - else: - # (dragondm) punt and try to determine account. - # this is something of a hack, but a user on 1 account is a - # common case, and is the way the current RS code works. - accounts = self.auth.get_projects(user=user) - if len(accounts) == 1: - account = accounts[0] - else: - #we can't tell what account they are logging in for. - return None, None if user and user.name == username: token_hash = hashlib.sha1('%s%s%f' % (username, key, @@ -167,11 +143,7 @@ class AuthMiddleware(wsgi.Middleware): token_dict = {} token_dict['token_hash'] = token_hash token_dict['cdn_management_url'] = '' - # auth url + project (account) id, e.g. - # http://foo.org:8774/baz/v1.0/myacct/ - os_url = '%s%s%s/' % (req.url, - '' if req.url.endswith('/') else '/', - account.id) + os_url = req.url token_dict['server_management_url'] = os_url token_dict['storage_url'] = '' token_dict['user_id'] = user.id diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template index 1c917ad44cec..cda2ecc28b68 100644 --- a/nova/auth/novarc.template +++ b/nova/auth/novarc.template @@ -11,5 +11,5 @@ export EUCALYPTUS_CERT=${NOVA_CERT} # euca-bundle-image seems to require this se alias ec2-bundle-image="ec2-bundle-image --cert ${EC2_CERT} --privatekey ${EC2_PRIVATE_KEY} --user 42 --ec2cert ${NOVA_CERT}" alias ec2-upload-bundle="ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}" export NOVA_API_KEY="%(access)s" -export NOVA_USERNAME="%(project)s:%(user)s" +export NOVA_USERNAME="%(user)s" export NOVA_URL="%(os)s" diff --git a/nova/tests/api/openstack/test_accounts.py b/nova/tests/api/openstack/test_accounts.py index b2e89824add7..746f02f570ed 100644 --- a/nova/tests/api/openstack/test_accounts.py +++ b/nova/tests/api/openstack/test_accounts.py @@ -70,7 +70,7 @@ class AccountsTest(test.TestCase): super(AccountsTest, self).tearDown() def test_get_account(self): - req = webob.Request.blank('/v1.0/test1') + req = webob.Request.blank('/v1.0/accounts/test1') res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) @@ -80,7 +80,7 @@ class AccountsTest(test.TestCase): self.assertEqual(res.status_int, 200) def test_account_delete(self): - req = webob.Request.blank('/v1.0/test1') + req = webob.Request.blank('/v1.0/accounts/test1') req.method = 'DELETE' res = req.get_response(fakes.wsgi_app()) self.assertTrue('test1' not in fakes.FakeAuthManager.projects) @@ -89,7 +89,7 @@ class AccountsTest(test.TestCase): def test_account_create(self): body = dict(account=dict(description='test account', manager='guy1')) - req = webob.Request.blank('/v1.0/newacct') + req = webob.Request.blank('/v1.0/accounts/newacct') req.method = 'PUT' req.body = json.dumps(body) @@ -108,7 +108,7 @@ class AccountsTest(test.TestCase): def test_account_update(self): body = dict(account=dict(description='test account', manager='guy2')) - req = webob.Request.blank('/v1.0/test1') + req = webob.Request.blank('/v1.0/accounts/test1') req.method = 'PUT' req.body = json.dumps(body) diff --git a/nova/tests/api/openstack/test_adminapi.py b/nova/tests/api/openstack/test_adminapi.py index 7cb9e8450932..4568cb9f5c0e 100644 --- a/nova/tests/api/openstack/test_adminapi.py +++ b/nova/tests/api/openstack/test_adminapi.py @@ -50,7 +50,7 @@ class AdminAPITest(test.TestCase): def test_admin_enabled(self): FLAGS.allow_admin_api = True # We should still be able to access public operations. - req = webob.Request.blank('/v1.0/testacct/flavors') + req = webob.Request.blank('/v1.0/flavors') res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) # TODO: Confirm admin operations are available. @@ -58,7 +58,7 @@ class AdminAPITest(test.TestCase): def test_admin_disabled(self): FLAGS.allow_admin_api = False # We should still be able to access public operations. - req = webob.Request.blank('/v1.0/testacct/flavors') + req = webob.Request.blank('/v1.0/flavors') res = req.get_response(fakes.wsgi_app()) # TODO: Confirm admin operations are unavailable. self.assertEqual(res.status_int, 200) diff --git a/nova/tests/api/openstack/test_auth.py b/nova/tests/api/openstack/test_auth.py index 8268a6fb9915..49f90879d7ef 100644 --- a/nova/tests/api/openstack/test_auth.py +++ b/nova/tests/api/openstack/test_auth.py @@ -78,7 +78,7 @@ class Test(test.TestCase): self.assertEqual(result.status, '204 No Content') self.assertEqual(len(result.headers['X-Auth-Token']), 40) self.assertEqual(result.headers['X-Server-Management-Url'], - "http://foo/v1.0/test/") + "http://foo/v1.0/") self.assertEqual(result.headers['X-CDN-Management-Url'], "") self.assertEqual(result.headers['X-Storage-Url'], "") diff --git a/nova/tests/api/openstack/test_flavors.py b/nova/tests/api/openstack/test_flavors.py index ba0785b0ea68..8280a505fa65 100644 --- a/nova/tests/api/openstack/test_flavors.py +++ b/nova/tests/api/openstack/test_flavors.py @@ -42,11 +42,11 @@ class FlavorsTest(test.TestCase): super(FlavorsTest, self).tearDown() def test_get_flavor_list(self): - req = webob.Request.blank('/v1.0/testacct/flavors') + req = webob.Request.blank('/v1.0/flavors') res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) def test_get_flavor_by_id(self): - req = webob.Request.blank('/v1.0/testacct/flavors/1') + req = webob.Request.blank('/v1.0/flavors/1') res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) diff --git a/nova/tests/api/openstack/test_images.py b/nova/tests/api/openstack/test_images.py index 819ca001eaa0..dbe507f7d08d 100644 --- a/nova/tests/api/openstack/test_images.py +++ b/nova/tests/api/openstack/test_images.py @@ -216,7 +216,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): super(ImageControllerWithGlanceServiceTest, self).tearDown() def test_get_image_index(self): - req = webob.Request.blank('/v1.0/testacct/images') + req = webob.Request.blank('/v1.0/images') res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) @@ -228,7 +228,7 @@ class ImageControllerWithGlanceServiceTest(test.TestCase): "image %s not in fixture index!" % str(image)) def test_get_image_details(self): - req = webob.Request.blank('/v1.0/testacct/images/detail') + req = webob.Request.blank('/v1.0/images/detail') res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) diff --git a/nova/tests/api/openstack/test_servers.py b/nova/tests/api/openstack/test_servers.py index d592e06b0af3..705a2f8005d0 100644 --- a/nova/tests/api/openstack/test_servers.py +++ b/nova/tests/api/openstack/test_servers.py @@ -150,7 +150,7 @@ class ServersTest(test.TestCase): super(ServersTest, self).tearDown() def test_get_server_by_id(self): - req = webob.Request.blank('/v1.0/testacct/servers/1') + req = webob.Request.blank('/v1.0/servers/1') res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(res_dict['server']['id'], '1') @@ -161,7 +161,7 @@ class ServersTest(test.TestCase): public = ["1.2.3.4"] new_return_server = return_server_with_addresses(private, public) self.stubs.Set(nova.db.api, 'instance_get', new_return_server) - req = webob.Request.blank('/v1.0/testacct/servers/1') + req = webob.Request.blank('/v1.0/servers/1') res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) self.assertEqual(res_dict['server']['id'], '1') @@ -173,7 +173,7 @@ class ServersTest(test.TestCase): self.assertEqual(addresses["private"][0], private) def test_get_server_list(self): - req = webob.Request.blank('/v1.0/testacct/servers') + req = webob.Request.blank('/v1.0/servers') res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) @@ -224,7 +224,7 @@ class ServersTest(test.TestCase): name='server_test', imageId=2, flavorId=2, metadata={'hello': 'world', 'open': 'stack'}, personality={})) - req = webob.Request.blank('/v1.0/testacct/servers') + req = webob.Request.blank('/v1.0/servers') req.method = 'POST' req.body = json.dumps(body) @@ -233,7 +233,7 @@ class ServersTest(test.TestCase): self.assertEqual(res.status_int, 200) def test_update_no_body(self): - req = webob.Request.blank('/v1.0/testacct/servers/1') + req = webob.Request.blank('/v1.0/servers/1') req.method = 'PUT' res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 422) @@ -251,7 +251,7 @@ class ServersTest(test.TestCase): self.stubs.Set(nova.db.api, 'instance_update', server_update) - req = webob.Request.blank('/v1.0/testacct/servers/1') + req = webob.Request.blank('/v1.0/servers/1') req.method = 'PUT' req.body = self.body req.get_response(fakes.wsgi_app()) @@ -267,30 +267,30 @@ class ServersTest(test.TestCase): self.stubs.Set(nova.db.api, 'instance_update', server_update) - req = webob.Request.blank('/v1.0/testacct/servers/1') + req = webob.Request.blank('/v1.0/servers/1') req.method = 'PUT' req.body = self.body req.get_response(fakes.wsgi_app()) def test_create_backup_schedules(self): - req = webob.Request.blank('/v1.0/testacct/servers/1/backup_schedules') + req = webob.Request.blank('/v1.0/servers/1/backup_schedules') req.method = 'POST' res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status, '404 Not Found') def test_delete_backup_schedules(self): - req = webob.Request.blank('/v1.0/testacct/servers/1/backup_schedules') + req = webob.Request.blank('/v1.0/servers/1/backup_schedules') req.method = 'DELETE' res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status, '404 Not Found') def test_get_server_backup_schedules(self): - req = webob.Request.blank('/v1.0/testacct/servers/1/backup_schedules') + req = webob.Request.blank('/v1.0/servers/1/backup_schedules') res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status, '404 Not Found') def test_get_all_server_details(self): - req = webob.Request.blank('/v1.0/testacct/servers/detail') + req = webob.Request.blank('/v1.0/servers/detail') res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) @@ -321,7 +321,7 @@ class ServersTest(test.TestCase): self.stubs.Set(nova.db.api, 'instance_get_all_by_user', return_servers_with_host) - req = webob.Request.blank('/v1.0/testacct/servers/detail') + req = webob.Request.blank('/v1.0/servers/detail') res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) @@ -341,7 +341,7 @@ class ServersTest(test.TestCase): body = dict(server=dict( name='server_test', imageId=2, flavorId=2, metadata={}, personality={})) - req = webob.Request.blank('/v1.0/testacct/servers/1/pause') + req = webob.Request.blank('/v1.0/servers/1/pause') req.method = 'POST' req.content_type = 'application/json' req.body = json.dumps(body) @@ -353,7 +353,7 @@ class ServersTest(test.TestCase): body = dict(server=dict( name='server_test', imageId=2, flavorId=2, metadata={}, personality={})) - req = webob.Request.blank('/v1.0/testacct/servers/1/unpause') + req = webob.Request.blank('/v1.0/servers/1/unpause') req.method = 'POST' req.content_type = 'application/json' req.body = json.dumps(body) @@ -365,7 +365,7 @@ class ServersTest(test.TestCase): body = dict(server=dict( name='server_test', imageId=2, flavorId=2, metadata={}, personality={})) - req = webob.Request.blank('/v1.0/testacct/servers/1/suspend') + req = webob.Request.blank('/v1.0/servers/1/suspend') req.method = 'POST' req.content_type = 'application/json' req.body = json.dumps(body) @@ -377,7 +377,7 @@ class ServersTest(test.TestCase): body = dict(server=dict( name='server_test', imageId=2, flavorId=2, metadata={}, personality={})) - req = webob.Request.blank('/v1.0/testacct/servers/1/resume') + req = webob.Request.blank('/v1.0/servers/1/resume') req.method = 'POST' req.content_type = 'application/json' req.body = json.dumps(body) @@ -389,7 +389,7 @@ class ServersTest(test.TestCase): body = dict(server=dict( name='server_test', imageId=2, flavorId=2, metadata={}, personality={})) - req = webob.Request.blank('/v1.0/testacct/servers/1/reset_network') + req = webob.Request.blank('/v1.0/servers/1/reset_network') req.method = 'POST' req.content_type = 'application/json' req.body = json.dumps(body) @@ -402,7 +402,7 @@ class ServersTest(test.TestCase): name='server_test', imageId=2, flavorId=2, metadata={}, personality={})) req = webob.Request.blank( - '/v1.0/testacct/servers/1/inject_network_info') + '/v1.0/servers/1/inject_network_info') req.method = 'POST' req.content_type = 'application/json' req.body = json.dumps(body) @@ -410,13 +410,13 @@ class ServersTest(test.TestCase): self.assertEqual(res.status_int, 202) def test_server_diagnostics(self): - req = webob.Request.blank("/v1.0/testacct/servers/1/diagnostics") + req = webob.Request.blank("/v1.0/servers/1/diagnostics") req.method = "GET" res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 404) def test_server_actions(self): - req = webob.Request.blank("/v1.0/testacct/servers/1/actions") + req = webob.Request.blank("/v1.0/servers/1/actions") req.method = "GET" res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 404) @@ -425,7 +425,7 @@ class ServersTest(test.TestCase): body = dict(server=dict( name='server_test', imageId=2, flavorId=2, metadata={}, personality={})) - req = webob.Request.blank('/v1.0/testacct/servers/1/action') + req = webob.Request.blank('/v1.0/servers/1/action') req.method = 'POST' req.content_type = 'application/json' req.body = json.dumps(body) @@ -435,7 +435,7 @@ class ServersTest(test.TestCase): body = dict(server=dict( name='server_test', imageId=2, flavorId=2, metadata={}, personality={})) - req = webob.Request.blank('/v1.0/testacct/servers/1/action') + req = webob.Request.blank('/v1.0/servers/1/action') req.method = 'POST' req.content_type = 'application/json' req.body = json.dumps(body) @@ -445,14 +445,14 @@ class ServersTest(test.TestCase): body = dict(server=dict( name='server_test', imageId=2, flavorId=2, metadata={}, personality={})) - req = webob.Request.blank('/v1.0/testacct/servers/1/action') + req = webob.Request.blank('/v1.0/servers/1/action') req.method = 'POST' req.content_type = 'application/json' req.body = json.dumps(body) res = req.get_response(fakes.wsgi_app()) def test_delete_server_instance(self): - req = webob.Request.blank('/v1.0/testacct/servers/1') + req = webob.Request.blank('/v1.0/servers/1') req.method = 'DELETE' self.server_delete_called = False diff --git a/nova/tests/api/openstack/test_users.py b/nova/tests/api/openstack/test_users.py index bd32254cd50a..14c7897f07e5 100644 --- a/nova/tests/api/openstack/test_users.py +++ b/nova/tests/api/openstack/test_users.py @@ -72,7 +72,7 @@ class UsersTest(test.TestCase): super(UsersTest, self).tearDown() def test_get_user_list(self): - req = webob.Request.blank('/v1.0/testacct/users') + req = webob.Request.blank('/v1.0/users') res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) @@ -80,7 +80,7 @@ class UsersTest(test.TestCase): self.assertEqual(len(res_dict['users']), 2) def test_get_user_by_id(self): - req = webob.Request.blank('/v1.0/testacct/users/guy2') + req = webob.Request.blank('/v1.0/users/guy2') res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) @@ -91,7 +91,7 @@ class UsersTest(test.TestCase): self.assertEqual(res.status_int, 200) def test_user_delete(self): - req = webob.Request.blank('/v1.0/testacct/users/guy1') + req = webob.Request.blank('/v1.0/users/guy1') req.method = 'DELETE' res = req.get_response(fakes.wsgi_app()) self.assertTrue('guy1' not in [u.id for u in @@ -103,7 +103,7 @@ class UsersTest(test.TestCase): access='acc3', secret='invasionIsInNormandy', admin=True)) - req = webob.Request.blank('/v1.0/testacct/users') + req = webob.Request.blank('/v1.0/users') req.method = 'POST' req.body = json.dumps(body) @@ -124,7 +124,7 @@ class UsersTest(test.TestCase): body = dict(user=dict(name='guy2', access='acc2', secret='invasionIsInNormandy')) - req = webob.Request.blank('/v1.0/testacct/users/guy2') + req = webob.Request.blank('/v1.0/users/guy2') req.method = 'PUT' req.body = json.dumps(body) diff --git a/nova/tests/api/openstack/test_zones.py b/nova/tests/api/openstack/test_zones.py index 51f13af48c80..6d869dc15e7d 100644 --- a/nova/tests/api/openstack/test_zones.py +++ b/nova/tests/api/openstack/test_zones.py @@ -85,7 +85,7 @@ class ZonesTest(test.TestCase): super(ZonesTest, self).tearDown() def test_get_zone_list(self): - req = webob.Request.blank('/v1.0/testacct/zones') + req = webob.Request.blank('/v1.0/zones') res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) @@ -93,7 +93,7 @@ class ZonesTest(test.TestCase): self.assertEqual(len(res_dict['zones']), 2) def test_get_zone_by_id(self): - req = webob.Request.blank('/v1.0/testacct/zones/1') + req = webob.Request.blank('/v1.0/zones/1') res = req.get_response(fakes.wsgi_app()) res_dict = json.loads(res.body) @@ -103,7 +103,7 @@ class ZonesTest(test.TestCase): self.assertEqual(res.status_int, 200) def test_zone_delete(self): - req = webob.Request.blank('/v1.0/testacct/zones/1') + req = webob.Request.blank('/v1.0/zones/1') res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) @@ -111,7 +111,7 @@ class ZonesTest(test.TestCase): def test_zone_create(self): body = dict(zone=dict(api_url='http://blah.zoo', username='fred', password='fubar')) - req = webob.Request.blank('/v1.0/testacct/zones') + req = webob.Request.blank('/v1.0/zones') req.method = 'POST' req.body = json.dumps(body) @@ -125,7 +125,7 @@ class ZonesTest(test.TestCase): def test_zone_update(self): body = dict(zone=dict(username='zeb', password='sneaky')) - req = webob.Request.blank('/v1.0/testacct/zones/1') + req = webob.Request.blank('/v1.0/zones/1') req.method = 'PUT' req.body = json.dumps(body) From e4b176d41cca234082c28ba6d9188745f1d2b98a Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Wed, 9 Mar 2011 00:49:56 +0000 Subject: [PATCH 26/76] a few fixes for the tests --- ..._os_type_to_instances.py => 010_add_os_type_to_instances.py} | 0 nova/tests/test_xenapi.py | 1 + nova/virt/xenapi/vmops.py | 2 +- 3 files changed, 2 insertions(+), 1 deletion(-) rename nova/db/sqlalchemy/migrate_repo/versions/{009_add_os_type_to_instances.py => 010_add_os_type_to_instances.py} (100%) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/009_add_os_type_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py similarity index 100% rename from nova/db/sqlalchemy/migrate_repo/versions/009_add_os_type_to_instances.py rename to nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index 383819b0087f..cd125a3019a0 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -440,6 +440,7 @@ class XenAPIMigrateInstance(test.TestCase): 'ramdisk_id': None, 'instance_type': 'm1.large', 'mac_address': 'aa:bb:cc:dd:ee:ff', + 'os_type': 'linux' } stubs.stub_out_migration_methods(self.stubs) glance_stubs.stubout_glance_client(self.stubs, diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index a6a9fbf955ac..aa4372c3d373 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -261,7 +261,7 @@ class VMOps(object): template_vm_ref, template_vdi_uuids = self._get_snapshot(instance) # call plugin to ship snapshot off to glance VMHelper.upload_image( - self._session, instance.id, template_vdi_uuids, image_id) + self._session, instance, template_vdi_uuids, image_id) finally: if template_vm_ref: self._destroy(instance, template_vm_ref, From 429fdb1ee733a62052c67f4e42c62447fc716ec0 Mon Sep 17 00:00:00 2001 From: Monsyne Dragon Date: Wed, 9 Mar 2011 18:10:45 +0000 Subject: [PATCH 27/76] removed uneeded **kw args leftover from removed account-in-url changes. --- nova/api/openstack/backup_schedules.py | 6 ++-- nova/api/openstack/consoles.py | 10 +++--- nova/api/openstack/flavors.py | 6 ++-- nova/api/openstack/images.py | 12 ++++---- nova/api/openstack/servers.py | 42 +++++++++++++------------- nova/api/openstack/shared_ip_groups.py | 12 ++++---- nova/api/openstack/users.py | 12 ++++---- nova/api/openstack/zones.py | 12 ++++---- 8 files changed, 56 insertions(+), 56 deletions(-) diff --git a/nova/api/openstack/backup_schedules.py b/nova/api/openstack/backup_schedules.py index a4d5939df6ee..7abb5f884d46 100644 --- a/nova/api/openstack/backup_schedules.py +++ b/nova/api/openstack/backup_schedules.py @@ -40,15 +40,15 @@ class Controller(wsgi.Controller): def __init__(self): pass - def index(self, req, server_id, **kw): + def index(self, req, server_id): """ Returns the list of backup schedules for a given instance """ return _translate_keys({}) - def create(self, req, server_id, **kw): + def create(self, req, server_id): """ No actual update method required, since the existing API allows both create and update through a POST """ return faults.Fault(exc.HTTPNotImplemented()) - def delete(self, req, server_id, id, **kw): + def delete(self, req, server_id, id): """ Deletes an existing backup schedule """ return faults.Fault(exc.HTTPNotImplemented()) diff --git a/nova/api/openstack/consoles.py b/nova/api/openstack/consoles.py index 85b2a4140621..9ebdbe710ad6 100644 --- a/nova/api/openstack/consoles.py +++ b/nova/api/openstack/consoles.py @@ -55,7 +55,7 @@ class Controller(wsgi.Controller): self.console_api = console.API() super(Controller, self).__init__() - def index(self, req, server_id, **kw): + def index(self, req, server_id): """Returns a list of consoles for this instance""" consoles = self.console_api.get_consoles( req.environ['nova.context'], @@ -63,14 +63,14 @@ class Controller(wsgi.Controller): return dict(consoles=[_translate_keys(console) for console in consoles]) - def create(self, req, server_id, **kw): + def create(self, req, server_id): """Creates a new console""" #info = self._deserialize(req.body, req) self.console_api.create_console( req.environ['nova.context'], int(server_id)) - def show(self, req, server_id, id, **kw): + def show(self, req, server_id, id): """Shows in-depth information on a specific console""" try: console = self.console_api.get_console( @@ -81,11 +81,11 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPNotFound()) return _translate_detail_keys(console) - def update(self, req, server_id, id, **kw): + def update(self, req, server_id, id): """You can't update a console""" raise faults.Fault(exc.HTTPNotImplemented()) - def delete(self, req, server_id, id, **kw): + def delete(self, req, server_id, id): """Deletes a console""" try: self.console_api.delete_console(req.environ['nova.context'], diff --git a/nova/api/openstack/flavors.py b/nova/api/openstack/flavors.py index 1de67328bfc0..f3d040ba3bf5 100644 --- a/nova/api/openstack/flavors.py +++ b/nova/api/openstack/flavors.py @@ -34,17 +34,17 @@ class Controller(wsgi.Controller): "attributes": { "flavor": ["id", "name", "ram", "disk"]}}} - def index(self, req, **kw): + def index(self, req): """Return all flavors in brief.""" return dict(flavors=[dict(id=flavor['id'], name=flavor['name']) for flavor in self.detail(req)['flavors']]) - def detail(self, req, **kw): + def detail(self, req): """Return all flavors in detail.""" items = [self.show(req, id)['flavor'] for id in self._all_ids(req)] return dict(flavors=items) - def show(self, req, id, **kw): + def show(self, req, id): """Return data about the given flavor id.""" ctxt = req.environ['nova.context'] values = db.instance_type_get_by_flavor_id(ctxt, id) diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 5bc5b9978767..cf85a496f017 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -115,14 +115,14 @@ class Controller(wsgi.Controller): def __init__(self): self._service = utils.import_object(FLAGS.image_service) - def index(self, req, **kw): + def index(self, req): """Return all public images in brief""" items = self._service.index(req.environ['nova.context']) items = common.limited(items, req) items = [_filter_keys(item, ('id', 'name')) for item in items] return dict(images=items) - def detail(self, req, **kw): + def detail(self, req): """Return all public images in detail""" try: items = self._service.detail(req.environ['nova.context']) @@ -136,7 +136,7 @@ class Controller(wsgi.Controller): items = [_translate_status(item) for item in items] return dict(images=items) - def show(self, req, id, **kw): + def show(self, req, id): """Return data about the given image id""" image_id = common.get_image_id_from_image_hash(self._service, req.environ['nova.context'], id) @@ -145,11 +145,11 @@ class Controller(wsgi.Controller): _convert_image_id_to_hash(image) return dict(image=image) - def delete(self, req, id, **kw): + def delete(self, req, id): # Only public images are supported for now. raise faults.Fault(exc.HTTPNotFound()) - def create(self, req, **kw): + def create(self, req): context = req.environ['nova.context'] env = self._deserialize(req.body, req) instance_id = env["image"]["serverId"] @@ -160,7 +160,7 @@ class Controller(wsgi.Controller): return dict(image=image_meta) - def update(self, req, id, **kw): + def update(self, req, id): # Users may not modify public images, and that's all that # we support for now. raise faults.Fault(exc.HTTPNotFound()) diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index 54060c2bbc5b..c2bf42b7239b 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -105,11 +105,11 @@ class Controller(wsgi.Controller): self._image_service = utils.import_object(FLAGS.image_service) super(Controller, self).__init__() - def index(self, req, **kw): + def index(self, req): """ Returns a list of server names and ids for a given user """ return self._items(req, entity_maker=_translate_keys) - def detail(self, req, **kw): + def detail(self, req): """ Returns a list of server details for a given user """ return self._items(req, entity_maker=_translate_detail_keys) @@ -123,7 +123,7 @@ class Controller(wsgi.Controller): res = [entity_maker(inst)['server'] for inst in limited_list] return dict(servers=res) - def show(self, req, id, **kw): + def show(self, req, id): """ Returns server details by server id """ try: instance = self.compute_api.get(req.environ['nova.context'], id) @@ -131,7 +131,7 @@ class Controller(wsgi.Controller): except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) - def delete(self, req, id, **kw): + def delete(self, req, id): """ Destroys a server """ try: self.compute_api.delete(req.environ['nova.context'], id) @@ -139,7 +139,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPNotFound()) return exc.HTTPAccepted() - def create(self, req, **kw): + def create(self, req): """ Creates a new server for a given user """ env = self._deserialize(req.body, req) if not env: @@ -180,7 +180,7 @@ class Controller(wsgi.Controller): onset_files=env.get('onset_files', [])) return _translate_keys(instances[0]) - def update(self, req, id, **kw): + def update(self, req, id): """ Updates the server name or password """ inst_dict = self._deserialize(req.body, req) if not inst_dict: @@ -202,7 +202,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPNotFound()) return exc.HTTPNoContent() - def action(self, req, id, **kw): + def action(self, req, id): """Multi-purpose method used to reboot, rebuild, or resize a server""" @@ -267,7 +267,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() - def lock(self, req, id, **kw): + def lock(self, req, id): """ lock the instance with id admin only operation @@ -282,7 +282,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() - def unlock(self, req, id, **kw): + def unlock(self, req, id): """ unlock the instance with id admin only operation @@ -297,7 +297,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() - def get_lock(self, req, id, **kw): + def get_lock(self, req, id): """ return the boolean state of (instance with id)'s lock @@ -311,7 +311,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() - def reset_network(self, req, id, **kw): + def reset_network(self, req, id): """ Reset networking on an instance (admin only). @@ -325,7 +325,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() - def inject_network_info(self, req, id, **kw): + def inject_network_info(self, req, id): """ Inject network info for an instance (admin only). @@ -339,7 +339,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() - def pause(self, req, id, **kw): + def pause(self, req, id): """ Permit Admins to Pause the server. """ ctxt = req.environ['nova.context'] try: @@ -350,7 +350,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() - def unpause(self, req, id, **kw): + def unpause(self, req, id): """ Permit Admins to Unpause the server. """ ctxt = req.environ['nova.context'] try: @@ -361,7 +361,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() - def suspend(self, req, id, **kw): + def suspend(self, req, id): """permit admins to suspend the server""" context = req.environ['nova.context'] try: @@ -372,7 +372,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() - def resume(self, req, id, **kw): + def resume(self, req, id): """permit admins to resume the server from suspend""" context = req.environ['nova.context'] try: @@ -383,7 +383,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() - def rescue(self, req, id, **kw): + def rescue(self, req, id): """Permit users to rescue the server.""" context = req.environ["nova.context"] try: @@ -394,7 +394,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() - def unrescue(self, req, id, **kw): + def unrescue(self, req, id): """Permit users to unrescue the server.""" context = req.environ["nova.context"] try: @@ -405,7 +405,7 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() - def get_ajax_console(self, req, id, **kw): + def get_ajax_console(self, req, id): """ Returns a url to an instance's ajaxterm console. """ try: self.compute_api.get_ajax_console(req.environ['nova.context'], @@ -414,12 +414,12 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPNotFound()) return exc.HTTPAccepted() - def diagnostics(self, req, id, **kw): + def diagnostics(self, req, id): """Permit Admins to retrieve server diagnostics.""" ctxt = req.environ["nova.context"] return self.compute_api.get_diagnostics(ctxt, id) - def actions(self, req, id, **kw): + def actions(self, req, id): """Permit Admins to retrieve server actions.""" ctxt = req.environ["nova.context"] items = self.compute_api.get_actions(ctxt, id) diff --git a/nova/api/openstack/shared_ip_groups.py b/nova/api/openstack/shared_ip_groups.py index e3c9177494d6..5d78f93777db 100644 --- a/nova/api/openstack/shared_ip_groups.py +++ b/nova/api/openstack/shared_ip_groups.py @@ -40,26 +40,26 @@ class Controller(wsgi.Controller): 'attributes': { 'sharedIpGroup': []}}} - def index(self, req, **kw): + def index(self, req): """ Returns a list of Shared IP Groups for the user """ return dict(sharedIpGroups=[]) - def show(self, req, id, **kw): + def show(self, req, id): """ Shows in-depth information on a specific Shared IP Group """ return _translate_keys({}) - def update(self, req, id, **kw): + def update(self, req, id): """ You can't update a Shared IP Group """ raise faults.Fault(exc.HTTPNotImplemented()) - def delete(self, req, id, **kw): + def delete(self, req, id): """ Deletes a Shared IP Group """ raise faults.Fault(exc.HTTPNotImplemented()) - def detail(self, req, **kw): + def detail(self, req): """ Returns a complete list of Shared IP Groups """ return _translate_detail_keys({}) - def create(self, req, **kw): + def create(self, req): """ Creates a new Shared IP group """ raise faults.Fault(exc.HTTPNotImplemented()) diff --git a/nova/api/openstack/users.py b/nova/api/openstack/users.py index ae3bf77918d2..83ebec964cdf 100644 --- a/nova/api/openstack/users.py +++ b/nova/api/openstack/users.py @@ -50,28 +50,28 @@ class Controller(wsgi.Controller): if not context.is_admin: raise exception.NotAuthorized(_("Not admin user")) - def index(self, req, **kw): + def index(self, req): """Return all users in brief""" users = self.manager.get_users() users = common.limited(users, req) users = [_translate_keys(user) for user in users] return dict(users=users) - def detail(self, req, **kw): + def detail(self, req): """Return all users in detail""" return self.index(req) - def show(self, req, id, **kw): + def show(self, req, id): """Return data about the given user id""" user = self.manager.get_user(id) return dict(user=_translate_keys(user)) - def delete(self, req, id, **kw): + def delete(self, req, id): self._check_admin(req.environ['nova.context']) self.manager.delete_user(id) return {} - def create(self, req, **kw): + def create(self, req): self._check_admin(req.environ['nova.context']) env = self._deserialize(req.body, req) is_admin = env['user'].get('admin') in ('T', 'True', True) @@ -81,7 +81,7 @@ class Controller(wsgi.Controller): user = self.manager.create_user(name, access, secret, is_admin) return dict(user=_translate_keys(user)) - def update(self, req, id, **kw): + def update(self, req, id): self._check_admin(req.environ['nova.context']) env = self._deserialize(req.body, req) is_admin = env['user'].get('admin') diff --git a/nova/api/openstack/zones.py b/nova/api/openstack/zones.py index 30bf2b67bc2b..d5206da20927 100644 --- a/nova/api/openstack/zones.py +++ b/nova/api/openstack/zones.py @@ -43,35 +43,35 @@ class Controller(wsgi.Controller): "attributes": { "zone": ["id", "api_url"]}}} - def index(self, req, **kw): + def index(self, req): """Return all zones in brief""" items = db.zone_get_all(req.environ['nova.context']) items = common.limited(items, req) items = [_scrub_zone(item) for item in items] return dict(zones=items) - def detail(self, req, **kw): + def detail(self, req): """Return all zones in detail""" return self.index(req) - def show(self, req, id, **kw): + def show(self, req, id): """Return data about the given zone id""" zone_id = int(id) zone = db.zone_get(req.environ['nova.context'], zone_id) return dict(zone=_scrub_zone(zone)) - def delete(self, req, id, **kw): + def delete(self, req, id): zone_id = int(id) db.zone_delete(req.environ['nova.context'], zone_id) return {} - def create(self, req, **kw): + def create(self, req): context = req.environ['nova.context'] env = self._deserialize(req.body, req) zone = db.zone_create(context, env["zone"]) return dict(zone=_scrub_zone(zone)) - def update(self, req, id, **kw): + def update(self, req, id): context = req.environ['nova.context'] env = self._deserialize(req.body, req) zone_id = int(id) From 75f7a73735957d5ddf04c7c9a23decf1a6fa7f9f Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Wed, 9 Mar 2011 14:55:36 -0600 Subject: [PATCH 28/76] Added naming scheme comment --- nova/virt/xenapi_conn.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index b63a5f8c3f42..bfe290be3690 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -49,6 +49,12 @@ reactor thread if the VM.get_by_name_label or VM.get_record calls block. address for the nova-volume host :target_port: iSCSI Target Port, 3260 Default :iqn_prefix: IQN Prefix, e.g. 'iqn.2010-10.org.openstack' + +**Variable Naming Scheme** + +- suffix "_ref" for opaque references +- suffix "_uuid" for UUIDs +- suffix "_rec" for record objects """ import sys From fb4785b85c1bef4179140cfb85ce01eca9fb5da5 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Wed, 9 Mar 2011 21:46:27 +0000 Subject: [PATCH 29/76] fix the copyright notice in migration --- .../migrate_repo/versions/010_add_os_type_to_instances.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py b/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py index 514b92b813a4..eb3066894ecf 100644 --- a/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py +++ b/nova/db/sqlalchemy/migrate_repo/versions/010_add_os_type_to_instances.py @@ -1,8 +1,6 @@ # vim: tabstop=4 shiftwidth=4 softtabstop=4 -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. +# Copyright 2010 OpenStack LLC. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain From 21937b48fcac81fa108f37f307b1b2e969bb7b4f Mon Sep 17 00:00:00 2001 From: "Kevin L. Mitchell" Date: Thu, 10 Mar 2011 00:01:15 +0000 Subject: [PATCH 30/76] Replace session.execute() calls performing raw UPDATE statements with SQLAlchemy code, with the exception of fixed_ip_disassociate_all_by_timeout() --- nova/db/sqlalchemy/api.py | 97 +++++++++++++++++++++++++-------------- 1 file changed, 62 insertions(+), 35 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 5e498fc6f129..22c85106d51c 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -701,14 +701,18 @@ def instance_data_get_for_project(context, project_id): def instance_destroy(context, instance_id): session = get_session() with session.begin(): - session.execute('update instances set deleted=1,' - 'deleted_at=:at where id=:id', - {'id': instance_id, - 'at': datetime.datetime.utcnow()}) - session.execute('update security_group_instance_association ' - 'set deleted=1,deleted_at=:at where instance_id=:id', - {'id': instance_id, - 'at': datetime.datetime.utcnow()}) + session.query(models.Instance).\ + filter_by(id=instance_id).\ + update({'deleted': 1, + 'deleted_at': datetime.datetime.utcnow(), + 'updated_at': models.Instance.updated_at + 0}) + session.query(models.SecurityGroupInstanceAssociation).\ + filter_by(instance_id=instance_id).\ + update({'deleted': 1, + 'deleted_at': datetime.datetime.utcnow(), + 'updated_at': + (models.SecurityGroupInstanceAssociation. + updated_at + 0)}) @require_context @@ -950,9 +954,11 @@ def key_pair_destroy_all_by_user(context, user_id): authorize_user_context(context, user_id) session = get_session() with session.begin(): - # TODO(vish): do we have to use sql here? - session.execute('update key_pairs set deleted=1 where user_id=:id', - {'id': user_id}) + session.query(models.KeyPair).\ + filter_by(user_id=user_id).\ + update({'deleted': 1, + 'deleted_at': datetime.datetime.utcnow(), + 'updated_at': models.KeyPair.updated_at + 0}) @require_context @@ -1063,7 +1069,9 @@ def network_disassociate(context, network_id): @require_admin_context def network_disassociate_all(context): session = get_session() - session.execute('update networks set project_id=NULL') + session.query(models.Network).\ + update({'project_id': None, + 'updated_at': models.Network.updated_at + 0}) @require_context @@ -1433,15 +1441,17 @@ def volume_data_get_for_project(context, project_id): def volume_destroy(context, volume_id): session = get_session() with session.begin(): - # TODO(vish): do we have to use sql here? - session.execute('update volumes set deleted=1 where id=:id', - {'id': volume_id}) - session.execute('update export_devices set volume_id=NULL ' - 'where volume_id=:id', - {'id': volume_id}) - session.execute('update iscsi_targets set volume_id=NULL ' - 'where volume_id=:id', - {'id': volume_id}) + session.query(models.Volume).\ + filter_by(id=volume_id).\ + update({'deleted': 1, + 'deleted_at': datetime.datetime.utcnow(), + 'updated_at': models.Volume.updated_at + 0}) + session.query(models.ExportDevice).\ + filter_by(volume_id=volume_id).\ + update({'volume_id': None}) + session.query(models.IscsiTarget).\ + filter_by(volume_id=volume_id).\ + update({'volume_id': None}) @require_admin_context @@ -1661,17 +1671,26 @@ def security_group_create(context, values): def security_group_destroy(context, security_group_id): session = get_session() with session.begin(): - # TODO(vish): do we have to use sql here? - session.execute('update security_groups set deleted=1 where id=:id', - {'id': security_group_id}) - session.execute('update security_group_instance_association ' - 'set deleted=1,deleted_at=:at ' - 'where security_group_id=:id', - {'id': security_group_id, - 'at': datetime.datetime.utcnow()}) - session.execute('update security_group_rules set deleted=1 ' - 'where group_id=:id', - {'id': security_group_id}) + session.query(models.SecurityGroup).\ + filter_by(id=security_group_id).\ + update({'deleted': 1, + 'deleted_at': datetime.datetime.utcnow(), + 'updated_at': + models.SecurityGroup.updated_at + 0}) + session.query(models.SecurityGroupInstanceAssociation).\ + filter_by(security_group_id=security_group_id).\ + update({'deleted': 1, + 'deleted_at': datetime.datetime.utcnow(), + 'updated_at': + (models.SecurityGroupInstanceAssocation. + updated_at + 0)}) + session.query(models.SecurityGroupIngressRule).\ + filter_by(group_id=security_group_id).\ + update({'deleted': 1, + 'deleted_at': datetime.datetime.utcnow(), + 'updated_at': + (models.SecurityGroupIngressRule. + updated_at + 0)}) @require_context @@ -1679,9 +1698,17 @@ def security_group_destroy_all(context, session=None): if not session: session = get_session() with session.begin(): - # TODO(vish): do we have to use sql here? - session.execute('update security_groups set deleted=1') - session.execute('update security_group_rules set deleted=1') + session.query(models.SecurityGroup).\ + update({'deleted': 1, + 'deleted_at': datetime.datetime.utcnow(), + 'updated_at': + models.SecurityGroup.updated_at + 0}) + session.query(models.SecurityGroupIngressRule).\ + update({'deleted': 1, + 'deleted_at': datetime.datetime.utcnow(), + 'updated_at': + (models.SecurityGroupIngressRule. + updated_at + 0)}) ################### From f0bb6d9fc47b92d335c7d7fa238dfd43f0dbdf69 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Thu, 10 Mar 2011 13:30:52 +0900 Subject: [PATCH 31/76] fixed based on reviewer's comment. --- bin/nova-manage | 8 ++-- nova/compute/manager.py | 22 +++++------ nova/db/sqlalchemy/api.py | 4 +- nova/scheduler/driver.py | 10 ++--- nova/tests/test_compute.py | 24 ++++++------ nova/tests/test_scheduler.py | 5 ++- nova/tests/test_service.py | 75 ++++++++++++++---------------------- nova/tests/test_virt.py | 6 +-- nova/tests/test_volume.py | 7 ++-- nova/virt/libvirt_conn.py | 22 +++++++---- nova/volume/driver.py | 2 +- 11 files changed, 88 insertions(+), 97 deletions(-) diff --git a/bin/nova-manage b/bin/nova-manage index d782f60287cd..f9e4fa8dcf20 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -567,7 +567,7 @@ class VmCommands(object): if (FLAGS.volume_driver != 'nova.volume.driver.AOEDriver' and \ FLAGS.volume_driver != 'nova.volume.driver.ISCSIDriver'): msg = _("Support only AOEDriver and ISCSIDriver. Sorry!") - raise exception.Error(msg) + raise exception.Error(msg) rpc.call(ctxt, FLAGS.scheduler_topic, @@ -637,8 +637,8 @@ class ServiceCommands(object): "args": {"host": host}}) if type(result) != dict: - print 'Unexpected error occurs' - print '[Result]', result + print _('An unexpected error has occurred.') + print _('[Result]'), result else: cpu = result['resource']['vcpus'] mem = result['resource']['memory_mb'] @@ -667,7 +667,7 @@ class ServiceCommands(object): ctxt = context.get_admin_context() service_refs = db.service_get_all_by_host(ctxt, host) if len(service_refs) <= 0: - raise exception.Invalid(_('%s does not exists.') % host) + raise exception.Invalid(_('%s does not exist.') % host) service_refs = [s for s in service_refs if s['topic'] == 'compute'] if len(service_refs) <= 0: diff --git a/nova/compute/manager.py b/nova/compute/manager.py index 3675cc92ee3d..0cab10fc34dc 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -64,7 +64,7 @@ flags.DEFINE_integer('password_length', 12, flags.DEFINE_string('console_host', socket.gethostname(), 'Console proxy host to use to connect to instances on' 'this host.') -flags.DEFINE_string('live_migration_retry_count', 30, +flags.DEFINE_integer('live_migration_retry_count', 30, ("Retry count needed in live_migration." " sleep 1 sec for each count")) @@ -757,8 +757,9 @@ class ComputeManager(manager.Manager): dirpath = FLAGS.instances_path fd, tmp_file = tempfile.mkstemp(dir=dirpath) LOG.debug(_("Creating tmpfile %s to notify to other " - "compute node that they mounts same storage.") % tmp_file) - os.fdopen(fd, 'w+').close() + "compute nodes that they should mount " + "the same storage.") % tmp_file) + os.close(fd) return os.path.basename(tmp_file) @exception.wrap_exception @@ -812,7 +813,7 @@ class ComputeManager(manager.Manager): # Getting fixed ips fixed_ip = self.db.instance_get_fixed_address(context, instance_id) if not fixed_ip: - msg = _("%(instance_id)s(%(ec2_id)s) does'nt have fixed_ip") + msg = _("%(instance_id)s(%(ec2_id)s) does not have fixed_ip.") raise exception.NotFound(msg % locals()) # If any volume is mounted, prepare here. @@ -929,7 +930,7 @@ class ComputeManager(manager.Manager): floating_ip = self.db.instance_get_floating_address(ctxt, instance_id) if not floating_ip: - LOG.info(_('floating_ip is not found for %s'), i_name) + LOG.info(_('No floating_ip is found for %s.'), i_name) else: floating_ip_ref = self.db.floating_ip_get_by_address(ctxt, floating_ip) @@ -937,7 +938,7 @@ class ComputeManager(manager.Manager): floating_ip_ref['address'], {'host': dest}) except exception.NotFound: - LOG.info(_('Floating_ip is not found for %s'), i_name) + LOG.info(_('No floating_ip is found for %s.'), i_name) except: LOG.error(_("Live migration: Unexpected error:" "%s cannot inherit floating ip..") % i_name) @@ -945,12 +946,11 @@ class ComputeManager(manager.Manager): # Restore instance/volume state self.recover_live_migration(ctxt, instance_ref, dest) - LOG.info(_('Migrating %(i_name)s to %(dest)s finishes successfully.') + LOG.info(_('Migrating %(i_name)s to %(dest)s finished successfully.') % locals()) - LOG.info(_("The below error is normally occurs. " - "Just check if instance is successfully migrated.\n" - "libvir: QEMU error : Domain not found: no domain " - "with matching name..")) + LOG.info(_("You may see the error \"libvirt: QEMU error: " + "Domain not found: no domain with matching name.\" " + "This error can be safely ignored.")) def recover_live_migration(self, ctxt, instance_ref, host=None): """Recovers Instance/volume state from migrating -> running. diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 8ea5062aebd6..f44ca0fa39c2 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -192,8 +192,8 @@ def service_get_all_compute_by_host(context, host): all() if not result: - raise exception.NotFound(_("%s does not exist or not " - "compute node.") % host) + raise exception.NotFound(_("%s does not exist or is not " + "a compute node.") % host) return result diff --git a/nova/scheduler/driver.py b/nova/scheduler/driver.py index 791f9000d37a..ed3dfe1c0e6b 100644 --- a/nova/scheduler/driver.py +++ b/nova/scheduler/driver.py @@ -226,7 +226,6 @@ class Scheduler(object): "args": {'cpu_info': oservice_ref['cpu_info']}}) except rpc.RemoteError: - ec2_id = instance_ref['hostname'] src = instance_ref['host'] logging.exception(_("host %(dest)s is not compatible with " "original host %(src)s.") % locals()) @@ -259,9 +258,10 @@ class Scheduler(object): mem_avail = mem_total - mem_used mem_inst = instance_ref['memory_mb'] if mem_avail <= mem_inst: - raise exception.NotEmpty(_("%(ec2_id)s is not capable to " - "migrate %(dest)s (host:%(mem_avail)s " - " <= instance:%(mem_inst)s)") + raise exception.NotEmpty(_("Unable to migrate %(ec2_id)s " + "to destination: %(dest)s " + "(host:%(mem_avail)s " + "<= instance:%(mem_inst)s)") % locals()) def mounted_on_same_shared_storage(self, context, instance_ref, dest): @@ -292,7 +292,7 @@ class Scheduler(object): except rpc.RemoteError: ipath = FLAGS.instances_path - logging.error(_("Cannot comfirm tmpfile at %(ipath)s is on " + logging.error(_("Cannot confirm tmpfile at %(ipath)s is on " "same shared storage between %(src)s " "and %(dest)s.") % locals()) raise diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 85c2c948bc67..71899ba9e1e1 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -89,14 +89,14 @@ class ComputeTestCase(test.TestCase): Use this when any testcase executed later than test_run_terminate """ vol1 = models.Volume() - vol1.__setitem__('id', 1) + vol1['id'] = 1 vol2 = models.Volume() - vol2.__setitem__('id', 2) + vol2['id'] = 2 instance_ref = models.Instance() - instance_ref.__setitem__('id', 1) - instance_ref.__setitem__('volumes', [vol1, vol2]) - instance_ref.__setitem__('hostname', 'i-00000001') - instance_ref.__setitem__('host', 'dummy') + instance_ref['id'] = 1 + instance_ref['volumes'] = [vol1, vol2] + instance_ref['hostname'] = 'i-00000001' + instance_ref['host'] = 'dummy' return instance_ref def test_create_instance_defaults_display_name(self): @@ -114,9 +114,9 @@ class ComputeTestCase(test.TestCase): """Make sure create associates security groups""" group = self._create_group() instance_ref = models.Instance() - instance_ref.__setitem__('id', 1) - instance_ref.__setitem__('volumes', [{'id': 1}, {'id': 2}]) - instance_ref.__setitem__('hostname', 'i-00000001') + instance_ref['id'] = 1 + instance_ref['volumes'] = [{'id': 1}, {'id': 2}] + instance_ref['hostname'] = 'i-00000001' return instance_ref def test_create_instance_defaults_display_name(self): @@ -390,7 +390,7 @@ class ComputeTestCase(test.TestCase): def test_pre_live_migration_instance_has_no_volume(self): """Confirm log meg when instance doesn't mount any volumes.""" i_ref = self._get_dummy_instance() - i_ref.__setitem__('volumes', []) + i_ref['volumes'] = [] c = context.get_admin_context() self._setup_other_managers() @@ -501,7 +501,7 @@ class ComputeTestCase(test.TestCase): def test_live_migration_dest_raises_exception_no_volume(self): """Same as above test(input pattern is different) """ i_ref = self._get_dummy_instance() - i_ref.__setitem__('volumes', []) + i_ref['volumes'] = [] c = context.get_admin_context() topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host']) @@ -526,7 +526,7 @@ class ComputeTestCase(test.TestCase): def test_live_migration_works_correctly_no_volume(self): """Confirm live_migration() works as expected correctly.""" i_ref = self._get_dummy_instance() - i_ref.__setitem__('volumes', []) + i_ref['volumes'] = [] c = context.get_admin_context() topic = db.queue_get_for(c, FLAGS.compute_topic, i_ref['host']) diff --git a/nova/tests/test_scheduler.py b/nova/tests/test_scheduler.py index 711b66af70d3..8ac02c5a468c 100644 --- a/nova/tests/test_scheduler.py +++ b/nova/tests/test_scheduler.py @@ -119,7 +119,8 @@ class SchedulerTestCase(test.TestCase): try: scheduler.show_host_resources(ctxt, dest) except exception.NotFound, e: - c1 = (0 <= e.message.find('does not exist or not compute node')) + c1 = (e.message.find(_("does not exist or is not a " + "compute node.")) >= 0) self.assertTrue(c1) def _dic_is_equal(self, dic1, dic2, keys=None): @@ -786,7 +787,7 @@ class SimpleDriverTestCase(test.TestCase): i_ref, 'somewhere') except exception.NotEmpty, e: - c = (e.message.find('is not capable to migrate') >= 0) + c = (e.message.find('Unable to migrate') >= 0) self.assertTrue(c) db.instance_destroy(self.context, instance_id) diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py index d17f6a22a54f..666c4a11d227 100644 --- a/nova/tests/test_service.py +++ b/nova/tests/test_service.py @@ -42,24 +42,6 @@ class FakeManager(manager.Manager): def test_method(self): return 'manager' -# temporary variable to store host/binary/self.mox -# from each method to fake class. -global_host = None -global_binary = None -global_mox = None - - -class FakeComputeManager(compute_manager.ComputeManager): - """Fake computemanager manager for tests""" - - def __init__(self, compute_driver=None, *args, **kwargs): - global ghost, gbinary, gmox - self.update_available_resource(mox.IgnoreArg()) - gmox.ReplayAll() - super(FakeComputeManager, self).__init__(compute_driver, - *args, - **kwargs) - class ExtendedService(service.Service): def test_method(self): @@ -275,37 +257,38 @@ class ServiceTestCase(test.TestCase): """Confirm compute updates their record of compute-service table.""" host = 'foo' binary = 'nova-compute' - topic = 'compute1' - service_create = {'host': host, - 'binary': binary, - 'topic': topic, - 'report_count': 0, - 'availability_zone': 'nova'} - service_ref = {'host': host, - 'binary': binary, - 'topic': topic, - 'report_count': 0, - 'availability_zone': 'nova', - 'id': 1} - - service.db.service_get_by_args(mox.IgnoreArg(), - host, - binary).AndRaise(exception.NotFound()) - service.db.service_create(mox.IgnoreArg(), - service_create).AndReturn(service_ref) - self.mox.StubOutWithMock(compute_manager.ComputeManager, - 'update_available_resource') - - global ghost, gbinary, gmox - ghost = host - gbinary = binary - gmox = self.mox + topic = 'compute' + # Any mocks are not working without UnsetStubs() here. + self.mox.UnsetStubs() + ctxt = context.get_admin_context() + service_ref = db.service_create(ctxt, {'host': host, + 'binary': binary, + 'topic': topic}) serv = service.Service(host, binary, topic, - 'nova.tests.test_service.FakeComputeManager') - # ReplayAll has been executed FakeComputeManager.__init__() - #self.mox.ReplayAll() + 'nova.compute.manager.ComputeManager') + + # This testcase want to test calling update_available_resource. + # No need to call periodic call, then below variable must be set 0. + serv.report_interval = 0 + serv.periodic_interval = 0 + + # Creating mocks + self.mox.StubOutWithMock(service.rpc.Connection, 'instance') + service.rpc.Connection.instance(new=mox.IgnoreArg()) + service.rpc.Connection.instance(new=mox.IgnoreArg()) + self.mox.StubOutWithMock(serv.manager.driver, + 'update_available_resource') + serv.manager.driver.update_available_resource(mox.IgnoreArg(), host) + + # Just doing start()-stop(), not confirm new db record is created, + # because update_available_resource() works only in libvirt environment. + # This testcase confirms update_available_resource() is called. + # Otherwise, mox complains. + self.mox.ReplayAll() serv.start() serv.stop() + + db.service_destroy(ctxt, service_ref['id']) diff --git a/nova/tests/test_virt.py b/nova/tests/test_virt.py index 7ea8c0fb57d9..ee41ae7329ad 100644 --- a/nova/tests/test_virt.py +++ b/nova/tests/test_virt.py @@ -283,7 +283,7 @@ class LibvirtConnTestCase(test.TestCase): self.assertEquals(uri, testuri) db.instance_destroy(user_context, instance_ref['id']) - def tes1t_update_available_resource_works_correctly(self): + def test_update_available_resource_works_correctly(self): """Confirm compute_node table is updated successfully.""" org_path = FLAGS.instances_path = '' FLAGS.instances_path = '.' @@ -314,7 +314,7 @@ class LibvirtConnTestCase(test.TestCase): compute_node = service_ref['compute_node'][0] if sys.platform.upper() == 'LINUX2': - self.assertTrue(compute_node['vcpus'] > 0) + self.assertTrue(compute_node['vcpus'] >= 0) self.assertTrue(compute_node['memory_mb'] > 0) self.assertTrue(compute_node['local_gb'] > 0) self.assertTrue(compute_node['vcpus_used'] == 0) @@ -323,7 +323,7 @@ class LibvirtConnTestCase(test.TestCase): self.assertTrue(len(compute_node['hypervisor_type']) > 0) self.assertTrue(compute_node['hypervisor_version'] > 0) else: - self.assertTrue(compute_node['vcpus'] > 0) + self.assertTrue(compute_node['vcpus'] >= 0) self.assertTrue(compute_node['memory_mb'] == 0) self.assertTrue(compute_node['local_gb'] > 0) self.assertTrue(compute_node['vcpus_used'] == 0) diff --git a/nova/tests/test_volume.py b/nova/tests/test_volume.py index e8b4ceee8f48..d88e363da732 100644 --- a/nova/tests/test_volume.py +++ b/nova/tests/test_volume.py @@ -284,9 +284,10 @@ class AOETestCase(DriverTestCase): self.volume.check_for_export(self.context, self.instance_id) except exception.ProcessExecutionError, e: volume_id = volume_id_list[0] - msg = _("""Cannot confirm exported volume id:%(volume_id)s.""" - """vblade process for e%(shelf_id)s.%(blade_id)s """ - """isn't running.""") % locals() + msg = _("Cannot confirm exported volume id:%(volume_id)s. " + "vblade process for e%(shelf_id)s.%(blade_id)s " + "isn't running.") % locals() + msg_is_match = (0 <= e.message.find(msg)) self.assertTrue(msg_is_match) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 938719a7cfc2..43a9dc4e74ba 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -860,7 +860,14 @@ class LibvirtConnection(object): """ - return multiprocessing.cpu_count() + # On certain platforms, this will raise a NotImplementedError. + try: + return multiprocessing.cpu_count() + except NotImplementedError: + LOG.warn(_("Cannot get the number of cpu, because this " + "function is not implemented for this platform. " + "This error can be safely ignored for now.")) + return 0 def get_memory_mb_total(self): """Get the total memory size(MB) of physical computer. @@ -1042,9 +1049,9 @@ class LibvirtConnection(object): try: service_ref = db.service_get_all_compute_by_host(ctxt, host)[0] except exception.NotFound: - msg = _(("""Cannot update compute manager specific info,""" - """ Because no service record found.""")) - raise exception.Invalid(msg) + raise exception.Invalid(_("Cannot update compute manager " + "specific info, because no service " + "record was found.")) # Updating host information dic = {'vcpus': self.get_vcpu_total(), @@ -1059,11 +1066,11 @@ class LibvirtConnection(object): compute_node_ref = service_ref['compute_node'] if not compute_node_ref: - LOG.info(_('Compute_service record is created for %s ') % host) + LOG.info(_('Compute_service record created for %s ') % host) dic['service_id'] = service_ref['id'] db.compute_node_create(ctxt, dic) else: - LOG.info(_('Compute_service record is updated for %s ') % host) + LOG.info(_('Compute_service record updated for %s ') % host) db.compute_node_update(ctxt, compute_node_ref[0]['id'], dic) def compare_cpu(self, cpu_info): @@ -1081,8 +1088,7 @@ class LibvirtConnection(object): """ - LOG.info(_('Checking cpu_info: instance was launched this cpu.\n%s') - % cpu_info) + LOG.info(_('Instance launched has CPU info:\n%s') % cpu_info) dic = utils.loads(cpu_info) xml = str(Template(self.cpuinfo_xml, searchList=dic)) LOG.info(_('to xml...\n:%s ' % xml)) diff --git a/nova/volume/driver.py b/nova/volume/driver.py index a902da6acd83..31a6a02ee735 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -234,7 +234,7 @@ class AOEDriver(VolumeDriver): break if not exported: # Instance will be terminated in this case. - desc = _("Cannot confirm exported volume id:%(volume_id)s." + desc = _("Cannot confirm exported volume id:%(volume_id)s. " "vblade process for e%(shelf_id)s.%(blade_id)s " "isn't running.") % locals() raise exception.ProcessExecutionError(out, _err, cmd=cmd, From b75ab789194f1ced801b1d68ae8cc54051716414 Mon Sep 17 00:00:00 2001 From: Kei Masumoto Date: Thu, 10 Mar 2011 15:16:03 +0900 Subject: [PATCH 32/76] fix pep8 check --- nova/tests/test_service.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/nova/tests/test_service.py b/nova/tests/test_service.py index 666c4a11d227..393f9d20bcc0 100644 --- a/nova/tests/test_service.py +++ b/nova/tests/test_service.py @@ -284,9 +284,9 @@ class ServiceTestCase(test.TestCase): serv.manager.driver.update_available_resource(mox.IgnoreArg(), host) # Just doing start()-stop(), not confirm new db record is created, - # because update_available_resource() works only in libvirt environment. - # This testcase confirms update_available_resource() is called. - # Otherwise, mox complains. + # because update_available_resource() works only in + # libvirt environment. This testcase confirms + # update_available_resource() is called. Otherwise, mox complains. self.mox.ReplayAll() serv.start() serv.stop() From e76aad24ce8a9b1b7de1b2f874c22c9995f3071f Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Thu, 10 Mar 2011 14:30:17 +0100 Subject: [PATCH 33/76] Only include ramdisk and kernel id if they are actually set. --- nova/api/ec2/cloud.py | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index b1917e9eadc5..1d2254225263 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -147,8 +147,6 @@ class CloudController(object): instance_ref['id']) ec2_id = ec2utils.id_to_ec2_id(instance_ref['id']) image_ec2_id = self._image_ec2_id(instance_ref['image_id'], 'machine') - k_ec2_id = self._image_ec2_id(instance_ref['kernel_id'], 'kernel') - r_ec2_id = self._image_ec2_id(instance_ref['ramdisk_id'], 'ramdisk') data = { 'user-data': base64.b64decode(instance_ref['user_data']), 'meta-data': { @@ -167,8 +165,6 @@ class CloudController(object): 'instance-type': instance_ref['instance_type'], 'local-hostname': hostname, 'local-ipv4': address, - 'kernel-id': k_ec2_id, - 'ramdisk-id': r_ec2_id, 'placement': {'availability-zone': availability_zone}, 'public-hostname': hostname, 'public-ipv4': floating_ip or '', @@ -176,6 +172,13 @@ class CloudController(object): 'reservation-id': instance_ref['reservation_id'], 'security-groups': '', 'mpi': mpi}} + + for image_type in ['kernel', 'ramdisk']: + if '%s_id' % image_type in instance_ref: + ec2_id = self._image_ec2_id(instance_ref['%s_id' % image_type], + image_type) + data['meta-data']['%s-id' % image_type] = ec2_id + if False: # TODO(vish): store ancestor ids data['ancestor-ami-ids'] = [] if False: # TODO(vish): store product codes From 801212a0ff04ddc33719d17b8c8ca847db5b1228 Mon Sep 17 00:00:00 2001 From: Cory Wright Date: Thu, 10 Mar 2011 15:47:55 +0000 Subject: [PATCH 34/76] Use a FLAGS.default_os_type if available --- nova/virt/xenapi/vm_utils.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index a1b85284f383..8dd2461780b0 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -41,9 +41,11 @@ from nova.virt.xenapi import HelperBase from nova.virt.xenapi.volume_utils import StorageError -FLAGS = flags.FLAGS LOG = logging.getLogger("nova.virt.xenapi.vm_utils") +FLAGS = flags.FLAGS +flags.DEFINE_string('default_os_type', 'linux', 'Default OS type') + XENAPI_POWER_STATE = { 'Halted': power_state.SHUTDOWN, 'Running': power_state.RUNNING, @@ -347,7 +349,7 @@ class VMHelper(HelperBase): logging.debug(_("Asking xapi to upload %(vdi_uuids)s as" " ID %(image_id)s") % locals()) - os_type = instance.os_type and instance.os_type or 'linux' + os_type = instance.os_type or FLAGS.default_os_type params = {'vdi_uuids': vdi_uuids, 'image_id': image_id, From b361153a160ba1d61ed1d52de419cd27a8b4feda Mon Sep 17 00:00:00 2001 From: "Kevin L. Mitchell" Date: Thu, 10 Mar 2011 16:42:13 +0000 Subject: [PATCH 35/76] Correct a misspelling --- nova/db/sqlalchemy/api.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 22c85106d51c..2b60ab7d5eff 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -1682,7 +1682,7 @@ def security_group_destroy(context, security_group_id): update({'deleted': 1, 'deleted_at': datetime.datetime.utcnow(), 'updated_at': - (models.SecurityGroupInstanceAssocation. + (models.SecurityGroupInstanceAssociation. updated_at + 0)}) session.query(models.SecurityGroupIngressRule).\ filter_by(group_id=security_group_id).\ From 4ead485ab69ee1e92635857ba73133a9e1d3bbcb Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Thu, 10 Mar 2011 12:06:09 -0600 Subject: [PATCH 36/76] Cleaned up vmops --- nova/virt/xenapi/vm_utils.py | 26 ++++----- nova/virt/xenapi/vmops.py | 104 ++++++++++++++++------------------- 2 files changed, 61 insertions(+), 69 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index ce081a2d6ac5..4ad820bcd27e 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -90,7 +90,7 @@ class VMHelper(HelperBase): get_instance_type(instance.instance_type) mem = str(long(instance_type['memory_mb']) * 1024 * 1024) vcpus = str(instance_type['vcpus']) - rec = { + vm_rec = { 'name_label': instance.name, 'name_description': '', 'is_a_template': False, @@ -122,23 +122,23 @@ class VMHelper(HelperBase): #Complete VM configuration record according to the image type #non-raw/raw with PV kernel/raw in HVM mode if instance.kernel_id: - rec['PV_bootloader'] = '' - rec['PV_kernel'] = kernel - rec['PV_ramdisk'] = ramdisk - rec['PV_args'] = 'root=/dev/xvda1' - rec['PV_bootloader_args'] = '' - rec['PV_legacy_args'] = '' + vm_rec['PV_bootloader'] = '' + vm_rec['PV_kernel'] = kernel + vm_rec['PV_ramdisk'] = ramdisk + vm_rec['PV_args'] = 'root=/dev/xvda1' + vm_rec['PV_bootloader_args'] = '' + vm_rec['PV_legacy_args'] = '' else: if pv_kernel: - rec['PV_args'] = 'noninteractive' - rec['PV_bootloader'] = 'pygrub' + vm_rec['PV_args'] = 'noninteractive' + vm_rec['PV_bootloader'] = 'pygrub' else: - rec['HVM_boot_policy'] = 'BIOS order' - rec['HVM_boot_params'] = {'order': 'dc'} - rec['platform'] = {'acpi': 'true', 'apic': 'true', + vm_rec['HVM_boot_policy'] = 'BIOS order' + vm_rec['HVM_boot_params'] = {'order': 'dc'} + vm_rec['platform'] = {'acpi': 'true', 'apic': 'true', 'pae': 'true', 'viridian': 'true'} LOG.debug(_('Created VM %s...'), instance.name) - vm_ref = session.call_xenapi('VM.create', rec) + vm_ref = session.call_xenapi('VM.create', vm_rec) instance_name = instance.name LOG.debug(_('Created VM %(instance_name)s as %(vm_ref)s.') % locals()) return vm_ref diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 562ecd4d5562..5375df5b4f66 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -56,10 +56,10 @@ class VMOps(object): def list_instances(self): """List VM instances""" vms = [] - for vm in self._session.get_xenapi().VM.get_all(): - rec = self._session.get_xenapi().VM.get_record(vm) - if not rec["is_a_template"] and not rec["is_control_domain"]: - vms.append(rec["name_label"]) + for vm_ref in self._session.get_xenapi().VM.get_all(): + vm_rec = self._session.get_xenapi().VM.get_record(vm_ref) + if not vm_rec["is_a_template"] and not vm_rec["is_control_domain"]: + vms.append(vm_rec["name_label"]) return vms def _start(self, instance, vm_ref=None): @@ -371,8 +371,8 @@ class VMOps(object): def reboot(self, instance): """Reboot VM instance""" - vm = self._get_vm_opaque_ref(instance) - task = self._session.call_xenapi('Async.VM.clean_reboot', vm) + vm_ref = self._get_vm_opaque_ref(instance) + task = self._session.call_xenapi('Async.VM.clean_reboot', vm_ref) self._session.wait_for_task(task, instance.id) def set_admin_password(self, instance, new_pass): @@ -571,26 +571,27 @@ class VMOps(object): def pause(self, instance, callback): """Pause VM instance""" - vm = self._get_vm_opaque_ref(instance) - task = self._session.call_xenapi('Async.VM.pause', vm) + vm_ref = self._get_vm_opaque_ref(instance) + task = self._session.call_xenapi('Async.VM.pause', vm_ref) self._wait_with_callback(instance.id, task, callback) def unpause(self, instance, callback): """Unpause VM instance""" - vm = self._get_vm_opaque_ref(instance) - task = self._session.call_xenapi('Async.VM.unpause', vm) + vm_ref = self._get_vm_opaque_ref(instance) + task = self._session.call_xenapi('Async.VM.unpause', vm_ref) self._wait_with_callback(instance.id, task, callback) def suspend(self, instance, callback): """suspend the specified instance""" - vm = self._get_vm_opaque_ref(instance) - task = self._session.call_xenapi('Async.VM.suspend', vm) + vm_ref = self._get_vm_opaque_ref(instance) + task = self._session.call_xenapi('Async.VM.suspend', vm_ref) self._wait_with_callback(instance.id, task, callback) def resume(self, instance, callback): """resume the specified instance""" - vm = self._get_vm_opaque_ref(instance) - task = self._session.call_xenapi('Async.VM.resume', vm, False, True) + vm_ref = self._get_vm_opaque_ref(instance) + task = self._session.call_xenapi('Async.VM.resume', vm_ref, False, + True) self._wait_with_callback(instance.id, task, callback) def rescue(self, instance, callback): @@ -605,22 +606,18 @@ class VMOps(object): raise RuntimeError(_( "Instance is already in Rescue Mode: %s" % instance.name)) - vm = self._get_vm_opaque_ref(instance) - self._shutdown(instance, vm) - self._acquire_bootlock(vm) + vm_ref = self._get_vm_opaque_ref(instance) + self._shutdown(instance, vm_ref) + self._acquire_bootlock(vm_ref) instance._rescue = True self.spawn(instance) - rescue_vm = self._get_vm_opaque_ref(instance) + rescue_vm_ref = self._get_vm_opaque_ref(instance) - vbd = self._session.get_xenapi().VM.get_VBDs(vm)[0] + vbd = self._session.get_xenapi().VM.get_VBDs(vm_ref)[0] vdi_ref = self._session.get_xenapi().VBD.get_record(vbd)["VDI"] - vbd_ref = VMHelper.create_vbd( - self._session, - rescue_vm, - vdi_ref, - 1, - False) + vbd_ref = VMHelper.create_vbd(self._session, rescue_vm_ref, vdi_ref, + 1, False) self._session.call_xenapi("Async.VBD.plug", vbd_ref) @@ -637,7 +634,7 @@ class VMOps(object): raise exception.NotFound(_( "Instance is not in Rescue Mode: %s" % instance.name)) - original_vm = self._get_vm_opaque_ref(instance) + original_vm_ref = self._get_vm_opaque_ref(instance) vbds = self._session.get_xenapi().VM.get_VBDs(rescue_vm) instance._rescue = False @@ -662,20 +659,20 @@ class VMOps(object): task2 = self._session.call_xenapi('Async.VM.destroy', rescue_vm) self._session.wait_for_task(task2, instance.id) - self._release_bootlock(original_vm) - self._start(instance, original_vm) + self._release_bootlock(original_vm_ref) + self._start(instance, original_vm_ref) def get_info(self, instance): """Return data about VM instance""" - vm = self._get_vm_opaque_ref(instance) - rec = self._session.get_xenapi().VM.get_record(vm) - return VMHelper.compile_info(rec) + vm_ref = self._get_vm_opaque_ref(instance) + vm_rec = self._session.get_xenapi().VM.get_record(vm_ref) + return VMHelper.compile_info(vm_rec) def get_diagnostics(self, instance): """Return data about VM diagnostics""" - vm = self._get_vm_opaque_ref(instance) - rec = self._session.get_xenapi().VM.get_record(vm) - return VMHelper.compile_diagnostics(self._session, rec) + vm_ref = self._get_vm_opaque_ref(instance) + vm_rec = self._session.get_xenapi().VM.get_record(vm_ref) + return VMHelper.compile_diagnostics(self._session, vm_rec) def get_console_output(self, instance): """Return snapshot of console""" @@ -698,9 +695,9 @@ class VMOps(object): # at this stage even though they aren't implemented because these will # be needed for multi-nic and there was no sense writing it for single # network/single IP and then having to turn around and re-write it - vm_opaque_ref = self._get_vm_opaque_ref(instance.id) + vm_ref = self._get_vm_opaque_ref(instance.id) logging.debug(_("injecting network info to xenstore for vm: |%s|"), - vm_opaque_ref) + vm_ref) admin_context = context.get_admin_context() IPs = db.fixed_ip_get_all_by_instance(admin_context, instance['id']) networks = db.network_get_all_by_instance(admin_context, @@ -731,11 +728,10 @@ class VMOps(object): 'ips': [ip_dict(ip) for ip in network_IPs], 'ip6s': [ip6_dict(ip) for ip in network_IPs]} - self.write_to_param_xenstore(vm_opaque_ref, {location: mapping}) + self.write_to_param_xenstore(vm_ref, {location: mapping}) try: - self.write_to_xenstore(vm_opaque_ref, location, - mapping['location']) + self.write_to_xenstore(vm_ref, location, mapping['location']) except KeyError: # catch KeyError for domid if instance isn't running pass @@ -747,8 +743,8 @@ class VMOps(object): Creates vifs for an instance """ - vm_opaque_ref = self._get_vm_opaque_ref(instance.id) - logging.debug(_("creating vif(s) for vm: |%s|"), vm_opaque_ref) + vm_ref = self._get_vm_opaque_ref(instance.id) + logging.debug(_("creating vif(s) for vm: |%s|"), vm_ref) if networks is None: networks = db.network_get_all_by_instance(admin_context, instance['id']) @@ -768,12 +764,8 @@ class VMOps(object): except AttributeError: device = "0" - VMHelper.create_vif( - self._session, - vm_opaque_ref, - network_ref, - instance.mac_address, - device) + VMHelper.create_vif(self._session, vm_ref, network_ref, + instance.mac_address, device) def reset_network(self, instance): """ @@ -837,9 +829,9 @@ class VMOps(object): Any errors raised by the plugin will in turn raise a RuntimeError here. """ instance_id = vm.id - vm = self._get_vm_opaque_ref(vm) - rec = self._session.get_xenapi().VM.get_record(vm) - args = {'dom_id': rec['domid'], 'path': path} + vm_ref = self._get_vm_opaque_ref(vm) + vm_rec = self._session.get_xenapi().VM.get_record(vm_ref) + args = {'dom_id': vm_rec['domid'], 'path': path} args.update(addl_args) try: task = self._session.async_call_plugin(plugin, method, args) @@ -919,9 +911,9 @@ class VMOps(object): value for 'keys' is passed, the returned dict is filtered to only return the values for those keys. """ - vm = self._get_vm_opaque_ref(instance_or_vm) + vm_ref = self._get_vm_opaque_ref(instance_or_vm) data = self._session.call_xenapi_request('VM.get_xenstore_data', - (vm, )) + (vm_ref, )) ret = {} if keys is None: keys = data.keys() @@ -939,11 +931,11 @@ class VMOps(object): """Takes a key/value pair and adds it to the xenstore parameter record for the given vm instance. If the key exists in xenstore, it is overwritten""" - vm = self._get_vm_opaque_ref(instance_or_vm) + vm_ref = self._get_vm_opaque_ref(instance_or_vm) self.remove_from_param_xenstore(instance_or_vm, key) jsonval = json.dumps(val) self._session.call_xenapi_request('VM.add_to_xenstore_data', - (vm, key, jsonval)) + (vm_ref, key, jsonval)) def write_to_param_xenstore(self, instance_or_vm, mapping): """Takes a dict and writes each key/value pair to the xenstore @@ -958,14 +950,14 @@ class VMOps(object): them from the xenstore parameter record data for the given VM. If the key doesn't exist, the request is ignored. """ - vm = self._get_vm_opaque_ref(instance_or_vm) + vm_ref = self._get_vm_opaque_ref(instance_or_vm) if isinstance(key_or_keys, basestring): keys = [key_or_keys] else: keys = key_or_keys for key in keys: self._session.call_xenapi_request('VM.remove_from_xenstore_data', - (vm, key)) + (vm_ref, key)) def clear_param_xenstore(self, instance_or_vm): """Removes all data from the xenstore parameter record for this VM.""" From 03e5b8f7c4e1afc6637774acb3d28100035cd323 Mon Sep 17 00:00:00 2001 From: "Kevin L. Mitchell" Date: Thu, 10 Mar 2011 20:04:21 +0000 Subject: [PATCH 37/76] Partial revert of one conversion due to phantom magic exception from SQLAlchemy in unrelated code; convert all deletes --- nova/db/sqlalchemy/api.py | 65 ++++++++++++++++++++++++--------------- 1 file changed, 41 insertions(+), 24 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 2b60ab7d5eff..31adb33eea86 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -701,11 +701,21 @@ def instance_data_get_for_project(context, project_id): def instance_destroy(context, instance_id): session = get_session() with session.begin(): - session.query(models.Instance).\ - filter_by(id=instance_id).\ - update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), - 'updated_at': models.Instance.updated_at + 0}) + session.execute('update instances set deleted=1,' + 'deleted_at=:at where id=:id', + {'id': instance_id, + 'at': datetime.datetime.utcnow()}) + # NOTE(klmitch): for some reason, using the SQLAlchemy code + # here instead of the direct SQL update above causes the + # test_run_terminate_timestamps test (and only that one) to + # fail with an obscure TypeError exception from deep within + # SQLAlchemy; the nearest nova function in the traceback is + # instance_get() + # session.query(models.Instance).\ + # filter_by(id=instance_id).\ + # update({'deleted': 1, + # 'deleted_at': datetime.datetime.utcnow(), + # 'updated_at': models.Instance.updated_at + 0}) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(instance_id=instance_id).\ update({'deleted': 1, @@ -1837,12 +1847,15 @@ def user_create(_context, values): def user_delete(context, id): session = get_session() with session.begin(): - session.execute('delete from user_project_association ' - 'where user_id=:id', {'id': id}) - session.execute('delete from user_role_association ' - 'where user_id=:id', {'id': id}) - session.execute('delete from user_project_role_association ' - 'where user_id=:id', {'id': id}) + session.query(models.UserProjectAssociation).\ + filter_by(user_id=id).\ + delete() + session.query(models.UserRoleAssociation).\ + filter_by(user_id=id).\ + delete() + session.query(models.UserProjectRoleAssociation).\ + filter_by(user_id=id).\ + delete() user_ref = user_get(context, id, session=session) session.delete(user_ref) @@ -1933,10 +1946,12 @@ def project_update(context, project_id, values): def project_delete(context, id): session = get_session() with session.begin(): - session.execute('delete from user_project_association ' - 'where project_id=:id', {'id': id}) - session.execute('delete from user_project_role_association ' - 'where project_id=:id', {'id': id}) + session.query(models.UserProjectAssociation).\ + filter_by(project_id=id).\ + delete() + session.query(models.UserProjectRoleAssociation).\ + filter_by(project_id=id).\ + delete() project_ref = project_get(context, id, session=session) session.delete(project_ref) @@ -1961,11 +1976,11 @@ def user_get_roles_for_project(context, user_id, project_id): def user_remove_project_role(context, user_id, project_id, role): session = get_session() with session.begin(): - session.execute('delete from user_project_role_association where ' - 'user_id=:user_id and project_id=:project_id and ' - 'role=:role', {'user_id': user_id, - 'project_id': project_id, - 'role': role}) + session.query(models.UserProjectRoleAssociation).\ + filter_by(user_id=user_id).\ + filter_by(project_id=project_id).\ + filter_by(role=role).\ + delete() def user_remove_role(context, user_id, role): @@ -2116,8 +2131,9 @@ def console_delete(context, console_id): session = get_session() with session.begin(): # consoles are meant to be transient. (mdragon) - session.execute('delete from consoles ' - 'where id=:id', {'id': console_id}) + session.query(models.Console).\ + filter_by(id=console_id).\ + delete() def console_get_by_pool_instance(context, pool_id, instance_id): @@ -2273,8 +2289,9 @@ def zone_update(context, zone_id, values): def zone_delete(context, zone_id): session = get_session() with session.begin(): - session.execute('delete from zones ' - 'where id=:id', {'id': zone_id}) + session.query(models.Zone).\ + filter_by(id=zone_id).\ + delete() @require_admin_context From c177074649055f1da2ca97eb3c07139571d4a664 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Thu, 10 Mar 2011 12:10:49 -0800 Subject: [PATCH 38/76] Implements basic OpenStack API client, ready to support API tests --- nova/tests/integrated/__init__.py | 20 +++ nova/tests/integrated/api/__init__.py | 20 +++ nova/tests/integrated/api/client.py | 224 ++++++++++++++++++++++++++ 3 files changed, 264 insertions(+) create mode 100644 nova/tests/integrated/__init__.py create mode 100644 nova/tests/integrated/api/__init__.py create mode 100644 nova/tests/integrated/api/client.py diff --git a/nova/tests/integrated/__init__.py b/nova/tests/integrated/__init__.py new file mode 100644 index 000000000000..10e0a91d77c8 --- /dev/null +++ b/nova/tests/integrated/__init__.py @@ -0,0 +1,20 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Justin Santa Barbara +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`integrated` -- Tests whole systems, using mock services where needed +================================= +""" diff --git a/nova/tests/integrated/api/__init__.py b/nova/tests/integrated/api/__init__.py new file mode 100644 index 000000000000..5798ab3d1e6d --- /dev/null +++ b/nova/tests/integrated/api/__init__.py @@ -0,0 +1,20 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Justin Santa Barbara +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +:mod:`api` -- OpenStack API client, for testing rather than production +================================= +""" diff --git a/nova/tests/integrated/api/client.py b/nova/tests/integrated/api/client.py new file mode 100644 index 000000000000..5ab247fab0a3 --- /dev/null +++ b/nova/tests/integrated/api/client.py @@ -0,0 +1,224 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2011 Justin Santa Barbara +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import httplib +import urlparse + +from nova import log as logging + + +LOG = logging.getLogger('nova.tests.api') + + +class OpenstackApiException(Exception): + def __init__(self, message=None, response=None): + self.response = response + if not message: + message = 'Unspecified error' + + if response: + _status = response.status + _body = response.read() + + message = _('%(message)s\nStatus Code: %(_status)s\n' + 'Body: %(_body)s') % locals() + + super(OpenstackApiException, self).__init__(message) + + +class OpenstackApiAuthenticationException(OpenstackApiException): + def __init__(self, response=None, message=None): + if not message: + message = _("Authentication error") + super(OpenstackApiAuthenticationException, self).__init__(message, + response) + + +class OpenstackApiNotFoundException(OpenstackApiException): + def __init__(self, response=None, message=None): + if not message: + message = _("Item not found") + super(OpenstackApiNotFoundException, self).__init__(message, response) + + +class TestOpenStackClient(object): + """ A really basic OpenStack API client that is under our control, + so we can make changes / insert hooks for testing""" + + def __init__(self, auth_user, auth_key, auth_uri): + super(TestOpenStackClient, self).__init__() + self.auth_result = None + self.auth_user = auth_user + self.auth_key = auth_key + self.auth_uri = auth_uri + + def request(self, url, method='GET', body=None, headers=None): + if headers is None: + headers = {} + + parsed_url = urlparse.urlparse(url) + port = parsed_url.port + hostname = parsed_url.hostname + scheme = parsed_url.scheme + + if scheme == 'http': + conn = httplib.HTTPConnection(hostname, + port=port) + elif scheme == 'https': + conn = httplib.HTTPSConnection(hostname, + port=port) + else: + raise OpenstackApiException("Unknown scheme: %s" % url) + + relative_url = parsed_url.path + if parsed_url.query: + relative_url = relative_url + parsed_url.query + LOG.info(_("Doing %(method)s on %(relative_url)s") % locals()) + if body: + LOG.info(_("Body: %s") % body) + + conn.request(method, relative_url, body, headers) + response = conn.getresponse() + return response + + def _authenticate(self): + if self.auth_result: + return self.auth_result + + headers = {'X-Auth-User': self.auth_user, + 'X-Auth-Key': self.auth_key} + response = self.request(self.auth_uri, + headers=headers) + if not response.status in [204]: + raise OpenstackApiAuthenticationException(response=response) + + auth_headers = {} + for k, v in response.getheaders(): + auth_headers[k] = v + + self.auth_result = auth_headers + return self.auth_result + + def api_request(self, relative_uri, check_response_status=None, **kwargs): + auth_result = self._authenticate() + + base_uri = auth_result['X-Server-Management-Url'] + full_uri = base_uri + relative_uri + + headers = kwargs.setdefault('headers', {}) + headers['X-Auth-Token'] = auth_result['X-Auth-Token'] + + LOG.debug(_("HTTP request on %s") % (relative_uri)) + + response = self.request(full_uri, **kwargs) + + LOG.debug(_("Response => code %s") % (response.status)) + + if check_response_status: + if not response.status in check_response_status: + if response.status == 404: + raise OpenstackApiNotFoundException(response=response) + else: + raise OpenstackApiException( + message=_("Unexpected status code"), + response=response) + + return response + + def _decode_json(self, response): + body = response.read() + LOG.debug(_("Decoding JSON: %s") % (body)) + return json.loads(body) + + def api_get(self, relative_uri, **kwargs): + kwargs.setdefault('check_response_status', [200]) + response = self.api_request(relative_uri, **kwargs) + return self._decode_json(response) + + def api_post(self, relative_uri, body, **kwargs): + kwargs['method'] = 'POST' + if body: + headers = kwargs.setdefault('headers', {}) + headers['Content-Type'] = 'application/json' + kwargs['body'] = json.dumps(body) + + kwargs.setdefault('check_response_status', [200]) + response = self.api_request(relative_uri, **kwargs) + return self._decode_json(response) + + def api_delete(self, relative_uri, **kwargs): + kwargs['method'] = 'DELETE' + kwargs.setdefault('check_response_status', [200, 202]) + return self.api_request(relative_uri, **kwargs) + + def get_keys_detail(self): + return self.api_get('/keys/detail')['keys'] + + def post_key(self, key): + return self.api_post('/keys', key)['key'] + + def delete_key(self, key_id): + return self.api_delete('/keys/%s' % key_id) + + def get_volume(self, volume_id): + return self.api_get('/volumes/%s' % volume_id)['volume'] + + def get_volumes_detail(self): + return self.api_get('/volumes/detail')['volumes'] + + def post_volume(self, volume): + return self.api_post('/volumes', volume)['volume'] + + def delete_volume(self, volume_id): + return self.api_delete('/volumes/%s' % volume_id) + + def get_server(self, server_id): + return self.api_get('/servers/%s' % server_id)['server'] + + def get_servers(self, detail=True): + rel_url = '/servers/detail' if detail else '/servers' + return self.api_get(rel_url)['servers'] + + def post_server(self, server): + return self.api_post('/servers', server)['server'] + + def delete_server(self, server_id): + return self.api_delete('/servers/%s' % server_id) + + def get_image(self, image_id): + return self.api_get('/images/%s' % image_id)['image'] + + def get_images_detail(self): + return self.api_get('/images/detail')['images'] + + def post_image(self, image): + return self.api_post('/images', image)['image'] + + def delete_image(self, image_id): + return self.api_delete('/images/%s' % image_id) + + def get_flavor(self, flavor_id): + return self.api_get('/flavors/%s' % flavor_id)['flavor'] + + def get_flavors_detail(self): + return self.api_get('/flavors/detail')['flavors'] + + def post_flavor(self, flavor): + return self.api_post('/flavors', flavor)['flavor'] + + def delete_flavor(self, flavor_id): + return self.api_delete('/flavors/%s' % flavor_id) From 8aabc32a69bf47075a3fd8e677d1bd70cbbca339 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Thu, 10 Mar 2011 21:13:07 +0100 Subject: [PATCH 39/76] Add basic test case. --- nova/tests/test_cloud.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index cf8ee7eff723..db7c15aeb5b1 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -353,6 +353,18 @@ class CloudTestCase(test.TestCase): self.assertEqual('', img.metadata['description']) shutil.rmtree(pathdir) + def test_metadata_works_without_kernel_and_ramdisk(self): + inst = db.instance_create(self.context, {'host': self.compute.host, + 'vcpus': 2, + 'image_id': '123456', + 'user_data': '' }) + fixed = self.network.allocate_fixed_ip(self.context, inst['id']) + try: + self.cloud.get_metadata(fixed) + finally: + self.network.deallocate_fixed_ip(self.context, fixed) + db.instance_destroy(self.context, inst['id']) + def test_update_of_instance_display_fields(self): inst = db.instance_create(self.context, {}) ec2_id = ec2utils.id_to_ec2_id(inst['id']) From bd06f0ac0d0d3e3c9d7b296c5fe4bb8a0dd44c89 Mon Sep 17 00:00:00 2001 From: "Kevin L. Mitchell" Date: Thu, 10 Mar 2011 20:36:36 +0000 Subject: [PATCH 40/76] Last un-magiced session.execute() replaced with SQLAlchemy code... --- nova/db/sqlalchemy/api.py | 21 +++++++++++---------- 1 file changed, 11 insertions(+), 10 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 31adb33eea86..88125aaf5c01 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -579,16 +579,17 @@ def fixed_ip_disassociate_all_by_timeout(_context, host, time): session = get_session() # NOTE(vish): The nested select is because sqlite doesn't support # JOINs in UPDATEs. - result = session.execute('UPDATE fixed_ips SET instance_id = NULL, ' - 'leased = 0 ' - 'WHERE network_id IN (SELECT id FROM networks ' - 'WHERE host = :host) ' - 'AND updated_at < :time ' - 'AND instance_id IS NOT NULL ' - 'AND allocated = 0', - {'host': host, - 'time': time}) - return result.rowcount + inner_q = session.query(models.Network.id).\ + filter_by(host=host).\ + subquery() + result = session.query(models.FixedIp).\ + filter(models.FixedIp.network_id.in_(inner_q)).\ + filter(models.FixedIp.updated_at < time).\ + filter(models.FixedIp.instance_id != None).\ + filter_by(allocated=0).\ + update({'instance_id': None, + 'leased': 0}) + return result @require_admin_context From 6b4beef8093b4a7b4d42818567b5afb023af9251 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Thu, 10 Mar 2011 12:55:06 -0800 Subject: [PATCH 41/76] Don't wrap keys and volumes till they're in the API --- nova/tests/integrated/api/client.py | 21 --------------------- 1 file changed, 21 deletions(-) diff --git a/nova/tests/integrated/api/client.py b/nova/tests/integrated/api/client.py index 5ab247fab0a3..8ec46b5aee43 100644 --- a/nova/tests/integrated/api/client.py +++ b/nova/tests/integrated/api/client.py @@ -165,27 +165,6 @@ class TestOpenStackClient(object): kwargs.setdefault('check_response_status', [200, 202]) return self.api_request(relative_uri, **kwargs) - def get_keys_detail(self): - return self.api_get('/keys/detail')['keys'] - - def post_key(self, key): - return self.api_post('/keys', key)['key'] - - def delete_key(self, key_id): - return self.api_delete('/keys/%s' % key_id) - - def get_volume(self, volume_id): - return self.api_get('/volumes/%s' % volume_id)['volume'] - - def get_volumes_detail(self): - return self.api_get('/volumes/detail')['volumes'] - - def post_volume(self, volume): - return self.api_post('/volumes', volume)['volume'] - - def delete_volume(self, volume_id): - return self.api_delete('/volumes/%s' % volume_id) - def get_server(self, server_id): return self.api_get('/servers/%s' % server_id)['server'] From f81d925f86670e3ed32d815c219824f627d82bc2 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Thu, 10 Mar 2011 13:51:26 -0800 Subject: [PATCH 42/76] Better logging, be more careful about when we throw login errors re bug732866 --- nova/tests/integrated/api/client.py | 37 ++++++++++++++++++----------- 1 file changed, 23 insertions(+), 14 deletions(-) diff --git a/nova/tests/integrated/api/client.py b/nova/tests/integrated/api/client.py index 8ec46b5aee43..d424a6428299 100644 --- a/nova/tests/integrated/api/client.py +++ b/nova/tests/integrated/api/client.py @@ -99,11 +99,18 @@ class TestOpenStackClient(object): if self.auth_result: return self.auth_result + auth_uri = self.auth_uri headers = {'X-Auth-User': self.auth_user, 'X-Auth-Key': self.auth_key} - response = self.request(self.auth_uri, + response = self.request(auth_uri, headers=headers) - if not response.status in [204]: + + http_status = response.status + LOG.debug(_("%(auth_uri)s => code %(http_status)s") % locals()) + + # Until bug732866 is fixed, we can't check this properly... + #if http_status == 401: + if http_status != 204: raise OpenstackApiAuthenticationException(response=response) auth_headers = {} @@ -116,21 +123,21 @@ class TestOpenStackClient(object): def api_request(self, relative_uri, check_response_status=None, **kwargs): auth_result = self._authenticate() - base_uri = auth_result['X-Server-Management-Url'] + #NOTE(justinsb): httplib 'helpfully' converts headers to lower case + base_uri = auth_result['x-server-management-url'] full_uri = base_uri + relative_uri headers = kwargs.setdefault('headers', {}) - headers['X-Auth-Token'] = auth_result['X-Auth-Token'] - - LOG.debug(_("HTTP request on %s") % (relative_uri)) + headers['X-Auth-Token'] = auth_result['x-auth-token'] response = self.request(full_uri, **kwargs) - - LOG.debug(_("Response => code %s") % (response.status)) + + http_status = response.status + LOG.debug(_("%(relative_uri)s => code %(http_status)s") % locals()) if check_response_status: - if not response.status in check_response_status: - if response.status == 404: + if not http_status in check_response_status: + if http_status == 404: raise OpenstackApiNotFoundException(response=response) else: raise OpenstackApiException( @@ -181,8 +188,9 @@ class TestOpenStackClient(object): def get_image(self, image_id): return self.api_get('/images/%s' % image_id)['image'] - def get_images_detail(self): - return self.api_get('/images/detail')['images'] + def get_images(self, detail=True): + rel_url = '/images/detail' if detail else '/images' + return self.api_get(rel_url)['images'] def post_image(self, image): return self.api_post('/images', image)['image'] @@ -193,8 +201,9 @@ class TestOpenStackClient(object): def get_flavor(self, flavor_id): return self.api_get('/flavors/%s' % flavor_id)['flavor'] - def get_flavors_detail(self): - return self.api_get('/flavors/detail')['flavors'] + def get_flavors(self, detail=True): + rel_url = '/flavors/detail' if detail else '/flavors' + return self.api_get(rel_url)['flavors'] def post_flavor(self, flavor): return self.api_post('/flavors', flavor)['flavor'] From f110e718807ea4747a0ff95138c488961257aa7f Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Thu, 10 Mar 2011 13:56:24 -0800 Subject: [PATCH 43/76] pep8 fun --- nova/tests/integrated/api/client.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/tests/integrated/api/client.py b/nova/tests/integrated/api/client.py index d424a6428299..da8d87e073f9 100644 --- a/nova/tests/integrated/api/client.py +++ b/nova/tests/integrated/api/client.py @@ -104,7 +104,7 @@ class TestOpenStackClient(object): 'X-Auth-Key': self.auth_key} response = self.request(auth_uri, headers=headers) - + http_status = response.status LOG.debug(_("%(auth_uri)s => code %(http_status)s") % locals()) @@ -131,7 +131,7 @@ class TestOpenStackClient(object): headers['X-Auth-Token'] = auth_result['x-auth-token'] response = self.request(full_uri, **kwargs) - + http_status = response.status LOG.debug(_("%(relative_uri)s => code %(http_status)s") % locals()) From be66b329d5b94ffbfb782355ef342eadbaed72a5 Mon Sep 17 00:00:00 2001 From: Monsyne Dragon Date: Thu, 10 Mar 2011 22:14:53 +0000 Subject: [PATCH 44/76] Fix a fer nits jaypipes found in review. --- nova/api/openstack/accounts.py | 18 +++++++++++++++--- nova/api/openstack/users.py | 4 ++-- nova/tests/api/openstack/test_accounts.py | 6 +++--- nova/tests/api/openstack/test_auth.py | 15 ++++----------- nova/tests/api/openstack/test_users.py | 2 +- 5 files changed, 25 insertions(+), 20 deletions(-) diff --git a/nova/api/openstack/accounts.py b/nova/api/openstack/accounts.py index 3b90d27765bb..dd88c3390e70 100644 --- a/nova/api/openstack/accounts.py +++ b/nova/api/openstack/accounts.py @@ -21,6 +21,7 @@ from nova import log as logging from nova import wsgi from nova.auth import manager +from nova.api.openstack import faults FLAGS = flags.FLAGS LOG = logging.getLogger('nova.api.openstack') @@ -44,11 +45,17 @@ class Controller(wsgi.Controller): self.manager = manager.AuthManager() def _check_admin(self, context): - """ We cannot depend on the db layer to check for admin access - for the auth manager, so we do it here """ + """We cannot depend on the db layer to check for admin access + for the auth manager, so we do it here""" if not context.is_admin: raise exception.NotAuthorized(_("Not admin user.")) + def index(self, req): + raise faults.Fault(exc.HTTPNotImplemented()) + + def detail(self, req): + raise faults.Fault(exc.HTTPNotImplemented()) + def show(self, req, id): """Return data about the given account id""" account = self.manager.get_project(id) @@ -59,8 +66,13 @@ class Controller(wsgi.Controller): self.manager.delete_project(id) return {} + def create(self, req): + """We use update with create-or-update semantics + because the id comes from an external source""" + raise faults.Fault(exc.HTTPNotImplemented()) + def update(self, req, id): - """ This is really create or update. """ + """This is really create or update.""" self._check_admin(req.environ['nova.context']) env = self._deserialize(req.body, req) description = env['account'].get('description') diff --git a/nova/api/openstack/users.py b/nova/api/openstack/users.py index 83ebec964cdf..5bb20a718226 100644 --- a/nova/api/openstack/users.py +++ b/nova/api/openstack/users.py @@ -45,8 +45,8 @@ class Controller(wsgi.Controller): self.manager = manager.AuthManager() def _check_admin(self, context): - """ We cannot depend on the db layer to check for admin access - for the auth manager, so we do it here """ + """We cannot depend on the db layer to check for admin access + for the auth manager, so we do it here""" if not context.is_admin: raise exception.NotAuthorized(_("Not admin user")) diff --git a/nova/tests/api/openstack/test_accounts.py b/nova/tests/api/openstack/test_accounts.py index 746f02f570ed..78fceb47c34e 100644 --- a/nova/tests/api/openstack/test_accounts.py +++ b/nova/tests/api/openstack/test_accounts.py @@ -14,9 +14,10 @@ # under the License. +import json + import stubout import webob -import json import nova.api import nova.api.openstack.auth @@ -47,8 +48,7 @@ class AccountsTest(test.TestCase): fake_init) self.stubs.Set(nova.api.openstack.accounts.Controller, '_check_admin', fake_admin_check) - fakes.FakeAuthManager.auth_data = {} - fakes.FakeAuthManager.projects = {} + fakes.FakeAuthManager.clear_fakes() fakes.FakeAuthDatabase.data = {} fakes.stub_out_networking(self.stubs) fakes.stub_out_rate_limiting(self.stubs) diff --git a/nova/tests/api/openstack/test_auth.py b/nova/tests/api/openstack/test_auth.py index 49f90879d7ef..437a79ec5b54 100644 --- a/nova/tests/api/openstack/test_auth.py +++ b/nova/tests/api/openstack/test_auth.py @@ -51,9 +51,7 @@ class Test(test.TestCase): def test_authorize_user(self): f = fakes.FakeAuthManager() - u = nova.auth.manager.User(1, 'herp', None, None, None) - f.add_user('derp', u) - f.create_project('test', u) + f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None)) req = webob.Request.blank('/v1.0/') req.headers['X-Auth-User'] = 'herp' @@ -67,9 +65,7 @@ class Test(test.TestCase): def test_authorize_token(self): f = fakes.FakeAuthManager() - u = nova.auth.manager.User(1, 'herp', None, None, None) - f.add_user('derp', u) - f.create_project('test', u) + f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None)) req = webob.Request.blank('/v1.0/', {'HTTP_HOST': 'foo'}) req.headers['X-Auth-User'] = 'herp' @@ -86,7 +82,7 @@ class Test(test.TestCase): token = result.headers['X-Auth-Token'] self.stubs.Set(nova.api.openstack, 'APIRouter', fakes.FakeRouter) - req = webob.Request.blank('/v1.0/test/fake') + req = webob.Request.blank('/v1.0/fake') req.headers['X-Auth-Token'] = token result = req.get_response(fakes.wsgi_app()) self.assertEqual(result.status, '200 OK') @@ -180,9 +176,6 @@ class TestLimiter(test.TestCase): def test_authorize_token(self): f = fakes.FakeAuthManager() - u = nova.auth.manager.User(1, 'herp', None, None, None) - f.add_user('derp', u) - f.create_project('test', u) f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None)) req = webob.Request.blank('/v1.0/') @@ -194,7 +187,7 @@ class TestLimiter(test.TestCase): token = result.headers['X-Auth-Token'] self.stubs.Set(nova.api.openstack, 'APIRouter', fakes.FakeRouter) - req = webob.Request.blank('/v1.0/test/fake') + req = webob.Request.blank'/v1.0/fake') req.method = 'POST' req.headers['X-Auth-Token'] = token result = req.get_response(fakes.wsgi_app()) diff --git a/nova/tests/api/openstack/test_users.py b/nova/tests/api/openstack/test_users.py index 14c7897f07e5..1edefe713f2f 100644 --- a/nova/tests/api/openstack/test_users.py +++ b/nova/tests/api/openstack/test_users.py @@ -13,10 +13,10 @@ # License for the specific language governing permissions and limitations # under the License. +import json import stubout import webob -import json import nova.api import nova.api.openstack.auth From f251ef70bf83eebce0f851f8a1b052174be1d615 Mon Sep 17 00:00:00 2001 From: Monsyne Dragon Date: Thu, 10 Mar 2011 22:20:51 +0000 Subject: [PATCH 45/76] fix minor typo --- nova/tests/api/openstack/test_auth.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/tests/api/openstack/test_auth.py b/nova/tests/api/openstack/test_auth.py index 437a79ec5b54..ff8d42a1410c 100644 --- a/nova/tests/api/openstack/test_auth.py +++ b/nova/tests/api/openstack/test_auth.py @@ -187,7 +187,7 @@ class TestLimiter(test.TestCase): token = result.headers['X-Auth-Token'] self.stubs.Set(nova.api.openstack, 'APIRouter', fakes.FakeRouter) - req = webob.Request.blank'/v1.0/fake') + req = webob.Request.blank('/v1.0/fake') req.method = 'POST' req.headers['X-Auth-Token'] = token result = req.get_response(fakes.wsgi_app()) From 7e95a65ccec2336176f389d614a85c9e70da374d Mon Sep 17 00:00:00 2001 From: Monsyne Dragon Date: Thu, 10 Mar 2011 22:33:45 +0000 Subject: [PATCH 46/76] re-added a test change I removed thinking it was related to removed code. It wasn't :> --- nova/tests/api/openstack/test_auth.py | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/nova/tests/api/openstack/test_auth.py b/nova/tests/api/openstack/test_auth.py index ff8d42a1410c..aaaa4e415932 100644 --- a/nova/tests/api/openstack/test_auth.py +++ b/nova/tests/api/openstack/test_auth.py @@ -65,7 +65,9 @@ class Test(test.TestCase): def test_authorize_token(self): f = fakes.FakeAuthManager() - f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None)) + u = nova.auth.manager.User(1, 'herp', None, None, None) + f.add_user('derp', u) + f.create_project('test', u) req = webob.Request.blank('/v1.0/', {'HTTP_HOST': 'foo'}) req.headers['X-Auth-User'] = 'herp' @@ -176,7 +178,9 @@ class TestLimiter(test.TestCase): def test_authorize_token(self): f = fakes.FakeAuthManager() - f.add_user('derp', nova.auth.manager.User(1, 'herp', None, None, None)) + u = nova.auth.manager.User(1, 'herp', None, None, None) + f.add_user('derp', u) + f.create_project('test', u) req = webob.Request.blank('/v1.0/') req.headers['X-Auth-User'] = 'herp' From 2379fc056d96d56c852e94fe7c3898049a3670bc Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Thu, 10 Mar 2011 19:26:20 -0500 Subject: [PATCH 47/76] execvp: fix params --- .../etc/xensource/scripts/vif_rules.py | 33 +++++++++---------- 1 file changed, 16 insertions(+), 17 deletions(-) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py b/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py index d2b2d61e6c49..93aed298668b 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py @@ -71,13 +71,13 @@ def apply_iptables_rules(command, params): iptables = lambda *rule: execute('/sbin/iptables', *rule) iptables('-D', 'FORWARD', '-m', 'physdev', - '--physdev-in', '%(VIF)s' % params, - '-s', '%(IP)s' % params, + '--physdev-in', params['VIF'], + '-s', params['IP'], '-j', 'ACCEPT') if command == 'online': iptables('-A', 'FORWARD', '-m', 'physdev', - '--physdev-in', '%(VIF)s' % params, - '-s', '%(IP)s' % params, + '--physdev-in', params['VIF'], + '-s', params['IP'], '-j', 'ACCEPT') @@ -85,25 +85,24 @@ def apply_arptables_rules(command, params): arptables = lambda *rule: execute('/sbin/arptables', *rule) arptables('-D', 'FORWARD', '--opcode', 'Request', - '--in-interface', '%(VIF)s' % params, - '--source-ip', '%(IP)s' % params, - '--source-mac', '%(MAC)s' % params, + '--in-interface', params['VIF'], + '--source-ip', params['IP'], + '--source-mac', params['MAC'], '-j', 'ACCEPT') arptables('-D', 'FORWARD', '--opcode', 'Reply', - '--in-interface', '%(VIF)s' % params, - '--source-ip', '%(IP)s' % params, - '--source-mac', '%(MAC)s' % params, + '--in-interface', params['VIF'], + '--source-ip', params['IP'], + '--source-mac', params['MAC'], '-j', 'ACCEPT') if command == 'online': arptables('-A', 'FORWARD', '--opcode', 'Request', - '--in-interface', '%(VIF)s' % params - '--source-ip', '%(IP)s' % params, - '--source-mac', '%(MAC)s' % params, + '--in-interface', params['VIF'], + '--source-mac', params['MAC'], '-j', 'ACCEPT') arptables('-A', 'FORWARD', '--opcode', 'Reply', - '--in-interface', '%(VIF)s' % params, - '--source-ip', '%(IP)s' % params, - '--source-mac', '%(MAC)s' % params, + '--in-interface', params['VIF'], + '--source-ip', params['IP'], + '--source-mac', params['MAC'], '-j', 'ACCEPT') @@ -130,7 +129,7 @@ def apply_ebtables_rules(command, params): '-i', params['VIF'], '-j', 'DROP') if command == 'online': ebtables('-I', 'FORWARD', '1', '-s', '!', params['MAC'], - '-i', '%(VIF)s', '-j', 'DROP') + '-i', params['VIF'], '-j', 'DROP') if __name__ == "__main__": From b9a479ffc8e9db0c1888047d7f3df99b3b57b2ec Mon Sep 17 00:00:00 2001 From: Dan Prince Date: Thu, 10 Mar 2011 21:44:01 -0500 Subject: [PATCH 48/76] Make linux_net ensure_bridge commands that add and remove ip addr's from devices/bridges work with with the latest utils.execute method (execvp). --- nova/network/linux_net.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 9f9d282b6c7c..e69ed2f759bf 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -513,11 +513,9 @@ def ensure_bridge(bridge, interface, net_attrs=None): for line in out.split("\n"): fields = line.split() if fields and fields[0] == "inet": - params = ' '.join(fields[1:-1]) - _execute('sudo', 'ip', 'addr', - 'del', params, 'dev', fields[-1]) - _execute('sudo', 'ip', 'addr', - 'add', params, 'dev', bridge) + params = fields[1:-1] + _execute(*_ip_bridge_cmd('del', params, fields[-1])) + _execute(*_ip_bridge_cmd('add', params, bridge)) if gateway: _execute('sudo', 'route', 'add', '0.0.0.0', 'gw', gateway) out, err = _execute('sudo', 'brctl', 'addif', bridge, interface, @@ -739,3 +737,12 @@ def _ra_pid_for(bridge): if os.path.exists(pid_file): with open(pid_file, 'r') as f: return int(f.read()) + + +def _ip_bridge_cmd(action, params, device): + """Build commands to add/del ips to bridges/devices""" + + cmd = ['sudo', 'ip', 'addr', action] + cmd.extend(params) + cmd.extend(['dev', device]) + return cmd From 67a8d635af0a64ad220b163c00b96eadf7daf93f Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Fri, 11 Mar 2011 09:54:08 +0100 Subject: [PATCH 49/76] Make Authors check account for tests being run with different os.getcwd() depending on how they're run. Add missing people to Authors. --- .mailmap | 1 + Authors | 1 + nova/tests/test_misc.py | 12 +++++++----- 3 files changed, 9 insertions(+), 5 deletions(-) diff --git a/.mailmap b/.mailmap index ed4404ad57f0..ccf2109a79ef 100644 --- a/.mailmap +++ b/.mailmap @@ -28,6 +28,7 @@ + diff --git a/Authors b/Authors index 7993955e2f0d..4ee0643cfe18 100644 --- a/Authors +++ b/Authors @@ -19,6 +19,7 @@ Devin Carlen Ed Leafe Eldar Nugaev Eric Day +Eric Windisch Ewan Mellor Hisaharu Ishii Hisaki Ohara diff --git a/nova/tests/test_misc.py b/nova/tests/test_misc.py index a658e49788a1..1fbaf304f85c 100644 --- a/nova/tests/test_misc.py +++ b/nova/tests/test_misc.py @@ -24,18 +24,19 @@ from nova.utils import parse_mailmap, str_dict_replace, synchronized class ProjectTestCase(test.TestCase): def test_authors_up_to_date(self): - if os.path.exists('.bzr'): + topdir = os.path.normpath(os.path.dirname(__file__) + '/../../') + if os.path.exists(os.path.join(topdir, '.bzr')): contributors = set() - mailmap = parse_mailmap('.mailmap') + mailmap = parse_mailmap(os.path.join(topdir, '.mailmap')) import bzrlib.workingtree - tree = bzrlib.workingtree.WorkingTree.open('.') + tree = bzrlib.workingtree.WorkingTree.open(topdir) tree.lock_read() try: parents = tree.get_parent_ids() g = tree.branch.repository.get_graph() - for p in parents[1:]: + for p in parents: rev_ids = [r for r, _ in g.iter_ancestry(parents) if r != "null:"] revs = tree.branch.repository.get_revisions(rev_ids) @@ -44,7 +45,8 @@ class ProjectTestCase(test.TestCase): email = author.split(' ')[-1] contributors.add(str_dict_replace(email, mailmap)) - authors_file = open('Authors', 'r').read() + authors_file = open(os.path.join(topdir, 'Authors'), + 'r').read() missing = set() for contributor in contributors: From 46d1f6a8c888c1f6fdf12cf26df67eada1e8505b Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Fri, 11 Mar 2011 11:24:22 +0100 Subject: [PATCH 50/76] Use self.instances.pop in unfilter_instance to make the check/removal atomic. Move the semaphore grab outside the for loop in refresh_security_group_rules to avoid reading a value from self.instances, blocking waiting for the semaphore, having the instance be removed in the mean time, and then add its rules back. --- nova/virt/libvirt_conn.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index b74ed25f97c8..d82b33ddd7c8 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -1238,13 +1238,12 @@ class IptablesFirewallDriver(FirewallDriver): pass def unfilter_instance(self, instance): - if instance['id'] in self.instances: - del self.instances[instance['id']] + if self.instances.pop(instance['id'], False): self.remove_filters_for_instance(instance) self.iptables.apply() else: LOG.info(_('Attempted to unfilter instance %s which is not ' - 'filtered'), instance['id']) + 'filtered'), instance['id']) def prepare_instance_filter(self, instance): self.instances[instance['id']] = instance @@ -1387,11 +1386,11 @@ class IptablesFirewallDriver(FirewallDriver): pass def refresh_security_group_rules(self, security_group): - for instance in self.instances.values(): - # We use the semaphore to make sure noone applies the rule set - # after we've yanked the existing rules but before we've put in - # the new ones. - with self.iptables.semaphore: + # We use the semaphore to make sure noone applies the rule set + # after we've yanked the existing rules but before we've put in + # the new ones. + with self.iptables.semaphore: + for instance in self.instances.values(): self.remove_filters_for_instance(instance) self.add_filters_for_instance(instance) self.iptables.apply() From d250d522b5d6c164435fc254223ff9a0f055cf05 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Fri, 11 Mar 2011 17:55:28 +0100 Subject: [PATCH 51/76] Remove broken test. At least this way, it'll actually fix the problem and be mergable. --- nova/tests/test_cloud.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/nova/tests/test_cloud.py b/nova/tests/test_cloud.py index db7c15aeb5b1..cf8ee7eff723 100644 --- a/nova/tests/test_cloud.py +++ b/nova/tests/test_cloud.py @@ -353,18 +353,6 @@ class CloudTestCase(test.TestCase): self.assertEqual('', img.metadata['description']) shutil.rmtree(pathdir) - def test_metadata_works_without_kernel_and_ramdisk(self): - inst = db.instance_create(self.context, {'host': self.compute.host, - 'vcpus': 2, - 'image_id': '123456', - 'user_data': '' }) - fixed = self.network.allocate_fixed_ip(self.context, inst['id']) - try: - self.cloud.get_metadata(fixed) - finally: - self.network.deallocate_fixed_ip(self.context, fixed) - db.instance_destroy(self.context, inst['id']) - def test_update_of_instance_display_fields(self): inst = db.instance_create(self.context, {}) ec2_id = ec2utils.id_to_ec2_id(inst['id']) From 65f6648f61cb6eeb5cd109fe08ef2ab2f3646c8b Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Fri, 11 Mar 2011 12:09:20 -0500 Subject: [PATCH 52/76] cast execute commands to str --- plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py | 1 + 1 file changed, 1 insertion(+) diff --git a/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py b/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py index 93aed298668b..48122e6d6400 100755 --- a/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py +++ b/plugins/xenserver/networking/etc/xensource/scripts/vif_rules.py @@ -54,6 +54,7 @@ def main(dom_id, command, only_this_vif=None): def execute(*command, return_stdout=False): devnull = open(os.devnull, 'w') + command = map(str, command) proc = subprocess.Popen(command, close_fds=True, stdout=subprocess.PIPE, stderr=devnull) devnull.close() From 36b5f7d9cf377ce2a4dcdad07e7e14062cd3ec4d Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Fri, 11 Mar 2011 11:22:23 -0600 Subject: [PATCH 53/76] Further vmops cleanup --- nova/virt/xenapi/vmops.py | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 5375df5b4f66..f012fa446e67 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -536,10 +536,10 @@ class VMOps(object): """ instance_id = instance.id LOG.info(_("Destroying VM for Instance %(instance_id)s") % locals()) - vm = VMHelper.lookup(self._session, instance.name) - return self._destroy(instance, vm, shutdown=True) + vm_ref = VMHelper.lookup(self._session, instance.name) + return self._destroy(instance, vm_ref, shutdown=True) - def _destroy(self, instance, vm, shutdown=True, + def _destroy(self, instance, vm_ref, shutdown=True, destroy_kernel_ramdisk=True): """ Destroys VM instance by performing: @@ -549,17 +549,17 @@ class VMOps(object): 3. Destroying kernel and ramdisk files (if necessary) 4. Destroying that actual VM record """ - if vm is None: + if vm_ref is None: LOG.warning(_("VM is not present, skipping destroy...")) return if shutdown: - self._shutdown(instance, vm) + self._shutdown(instance, vm_ref) - self._destroy_vdis(instance, vm) + self._destroy_vdis(instance, vm_ref) if destroy_kernel_ramdisk: - self._destroy_kernel_ramdisk(instance, vm) - self._destroy_vm(instance, vm) + self._destroy_kernel_ramdisk(instance, vm_ref) + self._destroy_vm(instance, vm_ref) def _wait_with_callback(self, instance_id, task, callback): ret = None From cfc7d21b959bc929295868aeb3e84ea56afbfd9c Mon Sep 17 00:00:00 2001 From: "Kevin L. Mitchell" Date: Fri, 11 Mar 2011 17:41:22 +0000 Subject: [PATCH 54/76] Discovered literal_column(), which does exactly what I need --- nova/db/sqlalchemy/api.py | 49 ++++++++++++--------------------------- 1 file changed, 15 insertions(+), 34 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 88125aaf5c01..431cf6e8e722 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -34,6 +34,7 @@ from sqlalchemy.orm import joinedload from sqlalchemy.orm import joinedload_all from sqlalchemy.sql import exists from sqlalchemy.sql import func +from sqlalchemy.sql.expression import literal_column FLAGS = flags.FLAGS @@ -702,28 +703,16 @@ def instance_data_get_for_project(context, project_id): def instance_destroy(context, instance_id): session = get_session() with session.begin(): - session.execute('update instances set deleted=1,' - 'deleted_at=:at where id=:id', - {'id': instance_id, - 'at': datetime.datetime.utcnow()}) - # NOTE(klmitch): for some reason, using the SQLAlchemy code - # here instead of the direct SQL update above causes the - # test_run_terminate_timestamps test (and only that one) to - # fail with an obscure TypeError exception from deep within - # SQLAlchemy; the nearest nova function in the traceback is - # instance_get() - # session.query(models.Instance).\ - # filter_by(id=instance_id).\ - # update({'deleted': 1, - # 'deleted_at': datetime.datetime.utcnow(), - # 'updated_at': models.Instance.updated_at + 0}) + session.query(models.Instance).\ + filter_by(id=instance_id).\ + update({'deleted': 1, + 'deleted_at': datetime.datetime.utcnow(), + 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(instance_id=instance_id).\ update({'deleted': 1, 'deleted_at': datetime.datetime.utcnow(), - 'updated_at': - (models.SecurityGroupInstanceAssociation. - updated_at + 0)}) + 'updated_at': literal_column('updated_at')}) @require_context @@ -969,7 +958,7 @@ def key_pair_destroy_all_by_user(context, user_id): filter_by(user_id=user_id).\ update({'deleted': 1, 'deleted_at': datetime.datetime.utcnow(), - 'updated_at': models.KeyPair.updated_at + 0}) + 'updated_at': literal_column('updated_at')}) @require_context @@ -1082,7 +1071,7 @@ def network_disassociate_all(context): session = get_session() session.query(models.Network).\ update({'project_id': None, - 'updated_at': models.Network.updated_at + 0}) + 'updated_at': literal_column('updated_at')}) @require_context @@ -1456,7 +1445,7 @@ def volume_destroy(context, volume_id): filter_by(id=volume_id).\ update({'deleted': 1, 'deleted_at': datetime.datetime.utcnow(), - 'updated_at': models.Volume.updated_at + 0}) + 'updated_at': literal_column('updated_at')}) session.query(models.ExportDevice).\ filter_by(volume_id=volume_id).\ update({'volume_id': None}) @@ -1686,22 +1675,17 @@ def security_group_destroy(context, security_group_id): filter_by(id=security_group_id).\ update({'deleted': 1, 'deleted_at': datetime.datetime.utcnow(), - 'updated_at': - models.SecurityGroup.updated_at + 0}) + 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupInstanceAssociation).\ filter_by(security_group_id=security_group_id).\ update({'deleted': 1, 'deleted_at': datetime.datetime.utcnow(), - 'updated_at': - (models.SecurityGroupInstanceAssociation. - updated_at + 0)}) + 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupIngressRule).\ filter_by(group_id=security_group_id).\ update({'deleted': 1, 'deleted_at': datetime.datetime.utcnow(), - 'updated_at': - (models.SecurityGroupIngressRule. - updated_at + 0)}) + 'updated_at': literal_column('updated_at')}) @require_context @@ -1712,14 +1696,11 @@ def security_group_destroy_all(context, session=None): session.query(models.SecurityGroup).\ update({'deleted': 1, 'deleted_at': datetime.datetime.utcnow(), - 'updated_at': - models.SecurityGroup.updated_at + 0}) + 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupIngressRule).\ update({'deleted': 1, 'deleted_at': datetime.datetime.utcnow(), - 'updated_at': - (models.SecurityGroupIngressRule. - updated_at + 0)}) + 'updated_at': literal_column('updated_at')}) ################### From 195926d0635c0217edccf1cd763425163d3e92e7 Mon Sep 17 00:00:00 2001 From: "Kevin L. Mitchell" Date: Fri, 11 Mar 2011 19:22:31 +0000 Subject: [PATCH 55/76] Minor stylistic updates affecting indentation --- nova/db/sqlalchemy/api.py | 132 +++++++++++++++++++------------------- 1 file changed, 66 insertions(+), 66 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 89745aa9501d..08bc8fe2f623 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -581,15 +581,15 @@ def fixed_ip_disassociate_all_by_timeout(_context, host, time): # NOTE(vish): The nested select is because sqlite doesn't support # JOINs in UPDATEs. inner_q = session.query(models.Network.id).\ - filter_by(host=host).\ - subquery() + filter_by(host=host).\ + subquery() result = session.query(models.FixedIp).\ - filter(models.FixedIp.network_id.in_(inner_q)).\ - filter(models.FixedIp.updated_at < time).\ - filter(models.FixedIp.instance_id != None).\ - filter_by(allocated=0).\ - update({'instance_id': None, - 'leased': 0}) + filter(models.FixedIp.network_id.in_(inner_q)).\ + filter(models.FixedIp.updated_at < time).\ + filter(models.FixedIp.instance_id != None).\ + filter_by(allocated=0).\ + update({'instance_id': None, + 'leased': 0}) return result @@ -704,15 +704,15 @@ def instance_destroy(context, instance_id): session = get_session() with session.begin(): session.query(models.Instance).\ - filter_by(id=instance_id).\ - update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), - 'updated_at': literal_column('updated_at')}) + filter_by(id=instance_id).\ + update({'deleted': 1, + 'deleted_at': datetime.datetime.utcnow(), + 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupInstanceAssociation).\ - filter_by(instance_id=instance_id).\ - update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), - 'updated_at': literal_column('updated_at')}) + filter_by(instance_id=instance_id).\ + update({'deleted': 1, + 'deleted_at': datetime.datetime.utcnow(), + 'updated_at': literal_column('updated_at')}) @require_context @@ -955,10 +955,10 @@ def key_pair_destroy_all_by_user(context, user_id): session = get_session() with session.begin(): session.query(models.KeyPair).\ - filter_by(user_id=user_id).\ - update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), - 'updated_at': literal_column('updated_at')}) + filter_by(user_id=user_id).\ + update({'deleted': 1, + 'deleted_at': datetime.datetime.utcnow(), + 'updated_at': literal_column('updated_at')}) @require_context @@ -1079,8 +1079,8 @@ def network_disassociate(context, network_id): def network_disassociate_all(context): session = get_session() session.query(models.Network).\ - update({'project_id': None, - 'updated_at': literal_column('updated_at')}) + update({'project_id': None, + 'updated_at': literal_column('updated_at')}) @require_context @@ -1463,16 +1463,16 @@ def volume_destroy(context, volume_id): session = get_session() with session.begin(): session.query(models.Volume).\ - filter_by(id=volume_id).\ - update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), - 'updated_at': literal_column('updated_at')}) + filter_by(id=volume_id).\ + update({'deleted': 1, + 'deleted_at': datetime.datetime.utcnow(), + 'updated_at': literal_column('updated_at')}) session.query(models.ExportDevice).\ - filter_by(volume_id=volume_id).\ - update({'volume_id': None}) + filter_by(volume_id=volume_id).\ + update({'volume_id': None}) session.query(models.IscsiTarget).\ - filter_by(volume_id=volume_id).\ - update({'volume_id': None}) + filter_by(volume_id=volume_id).\ + update({'volume_id': None}) @require_admin_context @@ -1693,20 +1693,20 @@ def security_group_destroy(context, security_group_id): session = get_session() with session.begin(): session.query(models.SecurityGroup).\ - filter_by(id=security_group_id).\ - update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), - 'updated_at': literal_column('updated_at')}) + filter_by(id=security_group_id).\ + update({'deleted': 1, + 'deleted_at': datetime.datetime.utcnow(), + 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupInstanceAssociation).\ - filter_by(security_group_id=security_group_id).\ - update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), - 'updated_at': literal_column('updated_at')}) + filter_by(security_group_id=security_group_id).\ + update({'deleted': 1, + 'deleted_at': datetime.datetime.utcnow(), + 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupIngressRule).\ - filter_by(group_id=security_group_id).\ - update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), - 'updated_at': literal_column('updated_at')}) + filter_by(group_id=security_group_id).\ + update({'deleted': 1, + 'deleted_at': datetime.datetime.utcnow(), + 'updated_at': literal_column('updated_at')}) @require_context @@ -1715,13 +1715,13 @@ def security_group_destroy_all(context, session=None): session = get_session() with session.begin(): session.query(models.SecurityGroup).\ - update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), - 'updated_at': literal_column('updated_at')}) + update({'deleted': 1, + 'deleted_at': datetime.datetime.utcnow(), + 'updated_at': literal_column('updated_at')}) session.query(models.SecurityGroupIngressRule).\ - update({'deleted': 1, - 'deleted_at': datetime.datetime.utcnow(), - 'updated_at': literal_column('updated_at')}) + update({'deleted': 1, + 'deleted_at': datetime.datetime.utcnow(), + 'updated_at': literal_column('updated_at')}) ################### @@ -1851,14 +1851,14 @@ def user_delete(context, id): session = get_session() with session.begin(): session.query(models.UserProjectAssociation).\ - filter_by(user_id=id).\ - delete() + filter_by(user_id=id).\ + delete() session.query(models.UserRoleAssociation).\ - filter_by(user_id=id).\ - delete() + filter_by(user_id=id).\ + delete() session.query(models.UserProjectRoleAssociation).\ - filter_by(user_id=id).\ - delete() + filter_by(user_id=id).\ + delete() user_ref = user_get(context, id, session=session) session.delete(user_ref) @@ -1950,11 +1950,11 @@ def project_delete(context, id): session = get_session() with session.begin(): session.query(models.UserProjectAssociation).\ - filter_by(project_id=id).\ - delete() + filter_by(project_id=id).\ + delete() session.query(models.UserProjectRoleAssociation).\ - filter_by(project_id=id).\ - delete() + filter_by(project_id=id).\ + delete() project_ref = project_get(context, id, session=session) session.delete(project_ref) @@ -1980,10 +1980,10 @@ def user_remove_project_role(context, user_id, project_id, role): session = get_session() with session.begin(): session.query(models.UserProjectRoleAssociation).\ - filter_by(user_id=user_id).\ - filter_by(project_id=project_id).\ - filter_by(role=role).\ - delete() + filter_by(user_id=user_id).\ + filter_by(project_id=project_id).\ + filter_by(role=role).\ + delete() def user_remove_role(context, user_id, role): @@ -2135,8 +2135,8 @@ def console_delete(context, console_id): with session.begin(): # consoles are meant to be transient. (mdragon) session.query(models.Console).\ - filter_by(id=console_id).\ - delete() + filter_by(id=console_id).\ + delete() def console_get_by_pool_instance(context, pool_id, instance_id): @@ -2293,8 +2293,8 @@ def zone_delete(context, zone_id): session = get_session() with session.begin(): session.query(models.Zone).\ - filter_by(id=zone_id).\ - delete() + filter_by(id=zone_id).\ + delete() @require_admin_context From 0a130010c26e4ef9ba5b9917ff47766de7805ab9 Mon Sep 17 00:00:00 2001 From: Eric Windisch Date: Fri, 11 Mar 2011 14:24:03 -0500 Subject: [PATCH 56/76] process_input for tee. fixes: 733439 --- nova/virt/disk.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/disk.py b/nova/virt/disk.py index 5d499c42c527..9abe44cc3ea4 100644 --- a/nova/virt/disk.py +++ b/nova/virt/disk.py @@ -189,4 +189,4 @@ def _inject_net_into_fs(net, fs): utils.execute('sudo', 'chown', 'root:root', netdir) utils.execute('sudo', 'chmod', 755, netdir) netfile = os.path.join(netdir, 'interfaces') - utils.execute('sudo', 'tee', netfile, net) + utils.execute('sudo', 'tee', netfile, process_input=net) From 2ac7fa75c02c885fc9d4dfacba8318aadbdbfceb Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Fri, 11 Mar 2011 23:34:26 +0100 Subject: [PATCH 57/76] Indentation adjustment (cosmetical). --- nova/virt/libvirt_conn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index d82b33ddd7c8..678331eede5b 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -1243,7 +1243,7 @@ class IptablesFirewallDriver(FirewallDriver): self.iptables.apply() else: LOG.info(_('Attempted to unfilter instance %s which is not ' - 'filtered'), instance['id']) + 'filtered'), instance['id']) def prepare_instance_filter(self, instance): self.instances[instance['id']] = instance From a9d71273742f440af5687650dd9cd72d827a6bef Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Fri, 11 Mar 2011 23:36:28 +0100 Subject: [PATCH 58/76] Make the fallback value None instead of False --- nova/virt/libvirt_conn.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 678331eede5b..d2061a0ca53b 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -1238,7 +1238,7 @@ class IptablesFirewallDriver(FirewallDriver): pass def unfilter_instance(self, instance): - if self.instances.pop(instance['id'], False): + if self.instances.pop(instance['id'], None): self.remove_filters_for_instance(instance) self.iptables.apply() else: From b3f5a4d5a8e513fe65a3b1dde9b36fd1388afb67 Mon Sep 17 00:00:00 2001 From: "Kevin L. Mitchell" Date: Fri, 11 Mar 2011 22:55:56 +0000 Subject: [PATCH 59/76] Remove vish comment --- nova/db/sqlalchemy/api.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index 08bc8fe2f623..71b85d659a70 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -578,8 +578,6 @@ def fixed_ip_disassociate(context, address): @require_admin_context def fixed_ip_disassociate_all_by_timeout(_context, host, time): session = get_session() - # NOTE(vish): The nested select is because sqlite doesn't support - # JOINs in UPDATEs. inner_q = session.query(models.Network.id).\ filter_by(host=host).\ subquery() From da76b3d67b2c2e864025c4ba201b63e1dee2ff1f Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Fri, 11 Mar 2011 16:58:18 -0600 Subject: [PATCH 60/76] Review feedback --- nova/virt/xenapi/vm_utils.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index a42cabfe453d..866eb5d62f8e 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -101,7 +101,7 @@ class VMHelper(HelperBase): get_instance_type(instance.instance_type) mem = str(long(instance_type['memory_mb']) * 1024 * 1024) vcpus = str(instance_type['vcpus']) - vm_rec = { + rec = { 'actions_after_crash': 'destroy', 'actions_after_reboot': 'restart', 'actions_after_shutdown': 'destroy', @@ -145,21 +145,21 @@ class VMHelper(HelperBase): vm_rec['platform']['nx'] = 'false' if instance.kernel_id: # 1. Kernel explicitly passed in, use that - vm_rec['PV_args'] = 'root=/dev/xvda1' - vm_rec['PV_kernel'] = kernel - vm_rec['PV_ramdisk'] = ramdisk + rec['PV_args'] = 'root=/dev/xvda1' + rec['PV_kernel'] = kernel + rec['PV_ramdisk'] = ramdisk else: # 2. Use kernel within the image - vm_rec['PV_args'] = 'clocksource=jiffies' - vm_rec['PV_bootloader'] = 'pygrub' + rec['PV_args'] = 'clocksource=jiffies' + rec['PV_bootloader'] = 'pygrub' else: # 3. Using hardware virtualization - vm_rec['platform']['nx'] = 'true' - vm_rec['HVM_boot_params'] = {'order': 'dc'} - vm_rec['HVM_boot_policy'] = 'BIOS order' + rec['platform']['nx'] = 'true' + rec['HVM_boot_params'] = {'order': 'dc'} + rec['HVM_boot_policy'] = 'BIOS order' LOG.debug(_('Created VM %s...'), instance.name) - vm_ref = session.call_xenapi('VM.create', vm_rec) + vm_ref = session.call_xenapi('VM.create', rec) instance_name = instance.name LOG.debug(_('Created VM %(instance_name)s as %(vm_ref)s.') % locals()) return vm_ref From 80bc32659e41f496bb1bfefbdd6ca63de7ff9f98 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Fri, 11 Mar 2011 17:11:25 -0600 Subject: [PATCH 61/76] Review feedback --- nova/virt/xenapi/vmops.py | 61 +++++++++++++++++++++------------------ 1 file changed, 33 insertions(+), 28 deletions(-) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 190c2022d27e..3ccdf9d80caf 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -434,7 +434,7 @@ class VMOps(object): raise RuntimeError(resp_dict['message']) return resp_dict['message'] - def _shutdown(self, instance, vm, hard=True): + def _shutdown(self, instance, vm_ref, hard=True): """Shutdown an instance""" state = self.get_info(instance['name'])['state'] if state == power_state.SHUTDOWN: @@ -448,31 +448,33 @@ class VMOps(object): try: task = None if hard: - task = self._session.call_xenapi("Async.VM.hard_shutdown", vm) + task = self._session.call_xenapi("Async.VM.hard_shutdown", + vm_ref) else: - task = self._session.call_xenapi('Async.VM.clean_shutdown', vm) + task = self._session.call_xenapi("Async.VM.clean_shutdown", + vm_ref) self._session.wait_for_task(task, instance.id) except self.XenAPI.Failure, exc: LOG.exception(exc) - def _destroy_vdis(self, instance, vm): - """Destroys all VDIs associated with a VM """ + def _destroy_vdis(self, instance, vm_ref): + """Destroys all VDIs associated with a VM""" instance_id = instance.id LOG.debug(_("Destroying VDIs for Instance %(instance_id)s") % locals()) - vdis = VMHelper.lookup_vm_vdis(self._session, vm) + vdi_refs = VMHelper.lookup_vm_vdis(self._session, vm_ref) - if not vdis: + if not vdi_refs: return - for vdi in vdis: + for vdi_ref in vdi_refs: try: - task = self._session.call_xenapi('Async.VDI.destroy', vdi) + task = self._session.call_xenapi('Async.VDI.destroy', vdi_ref) self._session.wait_for_task(task, instance.id) except self.XenAPI.Failure, exc: LOG.exception(exc) - def _destroy_kernel_ramdisk(self, instance, vm): + def _destroy_kernel_ramdisk(self, instance, vm_ref): """ Three situations can occur: @@ -499,8 +501,8 @@ class VMOps(object): "both" % locals())) # 3. We have both kernel and ramdisk - (kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk( - self._session, vm) + (kernel, ramdisk) = VMHelper.lookup_kernel_ramdisk(self._session, + vm_ref) LOG.debug(_("Removing kernel/ramdisk files")) @@ -511,11 +513,11 @@ class VMOps(object): LOG.debug(_("kernel/ramdisk files removed")) - def _destroy_vm(self, instance, vm): - """Destroys a VM record """ + def _destroy_vm(self, instance, vm_ref): + """Destroys a VM record""" instance_id = instance.id try: - task = self._session.call_xenapi('Async.VM.destroy', vm) + task = self._session.call_xenapi('Async.VM.destroy', vm_ref) self._session.wait_for_task(task, instance_id) except self.XenAPI.Failure, exc: LOG.exception(exc) @@ -596,8 +598,9 @@ class VMOps(object): - spawn a rescue VM (the vm name-label will be instance-N-rescue) """ - rescue_vm = VMHelper.lookup(self._session, instance.name + "-rescue") - if rescue_vm: + rescue_vm_ref = VMHelper.lookup(self._session, + instance.name + "-rescue") + if rescue_vm_ref: raise RuntimeError(_( "Instance is already in Rescue Mode: %s" % instance.name)) @@ -609,8 +612,8 @@ class VMOps(object): self.spawn(instance) rescue_vm_ref = self._get_vm_opaque_ref(instance) - vbd = self._session.get_xenapi().VM.get_VBDs(vm_ref)[0] - vdi_ref = self._session.get_xenapi().VBD.get_record(vbd)["VDI"] + vbd_ref = self._session.get_xenapi().VM.get_VBDs(vm_ref)[0] + vdi_ref = self._session.get_xenapi().VBD.get_record(vbd_ref)["VDI"] vbd_ref = VMHelper.create_vbd(self._session, rescue_vm_ref, vdi_ref, 1, False) @@ -623,35 +626,37 @@ class VMOps(object): - release the bootlock to allow the instance VM to start """ - rescue_vm = VMHelper.lookup(self._session, instance.name + "-rescue") + rescue_vm_ref = VMHelper.lookup(self._session, + instance.name + "-rescue") - if not rescue_vm: + if not rescue_vm_ref: raise exception.NotFound(_( "Instance is not in Rescue Mode: %s" % instance.name)) original_vm_ref = self._get_vm_opaque_ref(instance) - vbds = self._session.get_xenapi().VM.get_VBDs(rescue_vm) + vbd_refs = self._session.get_xenapi().VM.get_VBDs(rescue_vm_ref) instance._rescue = False - for vbd_ref in vbds: + for vbd_ref in vbd_refs: vbd = self._session.get_xenapi().VBD.get_record(vbd_ref) if vbd["userdevice"] == "1": VMHelper.unplug_vbd(self._session, vbd_ref) VMHelper.destroy_vbd(self._session, vbd_ref) - task1 = self._session.call_xenapi("Async.VM.hard_shutdown", rescue_vm) + task1 = self._session.call_xenapi("Async.VM.hard_shutdown", + rescue_vm_ref) self._session.wait_for_task(task1, instance.id) - vdis = VMHelper.lookup_vm_vdis(self._session, rescue_vm) - for vdi in vdis: + vdi_refs = VMHelper.lookup_vm_vdis(self._session, rescue_vm_ref) + for vdi_ref in vdi_refs: try: - task = self._session.call_xenapi('Async.VDI.destroy', vdi) + task = self._session.call_xenapi('Async.VDI.destroy', vdi_ref) self._session.wait_for_task(task, instance.id) except self.XenAPI.Failure: continue - task2 = self._session.call_xenapi('Async.VM.destroy', rescue_vm) + task2 = self._session.call_xenapi('Async.VM.destroy', rescue_vm_ref) self._session.wait_for_task(task2, instance.id) self._release_bootlock(original_vm_ref) From ab37248cc2e40b06e1d349833da01494a9ca3641 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Fri, 11 Mar 2011 17:13:10 -0600 Subject: [PATCH 62/76] oops --- nova/virt/xenapi/vm_utils.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 866eb5d62f8e..8dd2461780b0 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -142,7 +142,7 @@ class VMHelper(HelperBase): # Complete VM configuration record according to the image type # non-raw/raw with PV kernel/raw in HVM mode if use_pv_kernel: - vm_rec['platform']['nx'] = 'false' + rec['platform']['nx'] = 'false' if instance.kernel_id: # 1. Kernel explicitly passed in, use that rec['PV_args'] = 'root=/dev/xvda1' From 29e59a96602265c5b5746865db94a3f00b8b5cf5 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Fri, 11 Mar 2011 15:21:31 -0800 Subject: [PATCH 63/76] Change capitalization of Openstack to OpenStack --- nova/tests/integrated/api/client.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/nova/tests/integrated/api/client.py b/nova/tests/integrated/api/client.py index da8d87e073f9..245eb8c69b36 100644 --- a/nova/tests/integrated/api/client.py +++ b/nova/tests/integrated/api/client.py @@ -24,7 +24,7 @@ from nova import log as logging LOG = logging.getLogger('nova.tests.api') -class OpenstackApiException(Exception): +class OpenStackApiException(Exception): def __init__(self, message=None, response=None): self.response = response if not message: @@ -37,22 +37,22 @@ class OpenstackApiException(Exception): message = _('%(message)s\nStatus Code: %(_status)s\n' 'Body: %(_body)s') % locals() - super(OpenstackApiException, self).__init__(message) + super(OpenStackApiException, self).__init__(message) -class OpenstackApiAuthenticationException(OpenstackApiException): +class OpenStackApiAuthenticationException(OpenStackApiException): def __init__(self, response=None, message=None): if not message: message = _("Authentication error") - super(OpenstackApiAuthenticationException, self).__init__(message, + super(OpenStackApiAuthenticationException, self).__init__(message, response) -class OpenstackApiNotFoundException(OpenstackApiException): +class OpenStackApiNotFoundException(OpenStackApiException): def __init__(self, response=None, message=None): if not message: message = _("Item not found") - super(OpenstackApiNotFoundException, self).__init__(message, response) + super(OpenStackApiNotFoundException, self).__init__(message, response) class TestOpenStackClient(object): @@ -82,7 +82,7 @@ class TestOpenStackClient(object): conn = httplib.HTTPSConnection(hostname, port=port) else: - raise OpenstackApiException("Unknown scheme: %s" % url) + raise OpenStackApiException("Unknown scheme: %s" % url) relative_url = parsed_url.path if parsed_url.query: @@ -111,7 +111,7 @@ class TestOpenStackClient(object): # Until bug732866 is fixed, we can't check this properly... #if http_status == 401: if http_status != 204: - raise OpenstackApiAuthenticationException(response=response) + raise OpenStackApiAuthenticationException(response=response) auth_headers = {} for k, v in response.getheaders(): @@ -138,9 +138,9 @@ class TestOpenStackClient(object): if check_response_status: if not http_status in check_response_status: if http_status == 404: - raise OpenstackApiNotFoundException(response=response) + raise OpenStackApiNotFoundException(response=response) else: - raise OpenstackApiException( + raise OpenStackApiException( message=_("Unexpected status code"), response=response) From f9706b5080786a4d3e530f3e8bdb69147e9f5086 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Fri, 11 Mar 2011 17:35:37 -0600 Subject: [PATCH 64/76] Review feedback --- nova/virt/xenapi/vm_utils.py | 31 ++++++++++++++++--------------- nova/virt/xenapi/vmops.py | 6 +++--- 2 files changed, 19 insertions(+), 18 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 8dd2461780b0..7aa3f3c3be0e 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -633,37 +633,38 @@ class VMHelper(HelperBase): return is_pv @classmethod - def lookup(cls, session, i): + def lookup(cls, session, name_label): """Look the instance i up, and returns it if available""" - vms = session.get_xenapi().VM.get_by_name_label(i) - n = len(vms) + vm_refs = session.get_xenapi().VM.get_by_name_label(name_label) + n = len(vm_refs) if n == 0: return None elif n > 1: - raise exception.Duplicate(_('duplicate name found: %s') % i) + raise exception.Duplicate(_('duplicate name found: %s') % + name_label) else: - return vms[0] + return vm_refs[0] @classmethod - def lookup_vm_vdis(cls, session, vm): + def lookup_vm_vdis(cls, session, vm_ref): """Look for the VDIs that are attached to the VM""" # Firstly we get the VBDs, then the VDIs. # TODO(Armando): do we leave the read-only devices? - vbds = session.get_xenapi().VM.get_VBDs(vm) - vdis = [] - if vbds: - for vbd in vbds: + vbd_refs = session.get_xenapi().VM.get_VBDs(vm_ref) + vdi_refs = [] + if vbd_refs: + for vbd_ref in vbd_refs: try: - vdi = session.get_xenapi().VBD.get_VDI(vbd) + vdi_ref = session.get_xenapi().VBD.get_VDI(vbd_ref) # Test valid VDI - record = session.get_xenapi().VDI.get_record(vdi) + record = session.get_xenapi().VDI.get_record(vdi_ref) LOG.debug(_('VDI %s is still available'), record['uuid']) except cls.XenAPI.Failure, exc: LOG.exception(exc) else: - vdis.append(vdi) - if len(vdis) > 0: - return vdis + vdi_refs.append(vdi_ref) + if len(vdi_refs) > 0: + return vdi_refs else: return None diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 3ccdf9d80caf..0faec11695ed 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -614,10 +614,10 @@ class VMOps(object): vbd_ref = self._session.get_xenapi().VM.get_VBDs(vm_ref)[0] vdi_ref = self._session.get_xenapi().VBD.get_record(vbd_ref)["VDI"] - vbd_ref = VMHelper.create_vbd(self._session, rescue_vm_ref, vdi_ref, - 1, False) + rescue_vbd_ref = VMHelper.create_vbd(self._session, rescue_vm_ref, + vdi_ref, 1, False) - self._session.call_xenapi("Async.VBD.plug", vbd_ref) + self._session.call_xenapi("Async.VBD.plug", rescue_vbd_ref) def unrescue(self, instance, callback): """Unrescue the specified instance From cdd8790426d3eb77712f5a19f99211b12a9ad9c5 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Fri, 11 Mar 2011 17:48:44 -0600 Subject: [PATCH 65/76] Review feedback --- nova/virt/xenapi/vm_utils.py | 10 +++++----- nova/virt/xenapi/vmops.py | 6 +++--- nova/virt/xenapi/volume_utils.py | 6 +++--- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 7aa3f3c3be0e..1a872345dbf9 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -202,13 +202,13 @@ class VMHelper(HelperBase): @classmethod def find_vbd_by_number(cls, session, vm_ref, number): """Get the VBD reference from the device number""" - vbds = session.get_xenapi().VM.get_VBDs(vm_ref) - if vbds: - for vbd in vbds: + vbd_refs = session.get_xenapi().VM.get_VBDs(vm_ref) + if vbd_refs: + for vbd_ref in vbd_refs: try: - vbd_rec = session.get_xenapi().VBD.get_record(vbd) + vbd_rec = session.get_xenapi().VBD.get_record(vbd_ref) if vbd_rec['userdevice'] == str(number): - return vbd + return vbd_ref except cls.XenAPI.Failure, exc: LOG.exception(exc) raise StorageError(_('VBD not found in instance %s') % vm_ref) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 0faec11695ed..382915b0cb3e 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -55,12 +55,12 @@ class VMOps(object): def list_instances(self): """List VM instances""" - vms = [] + vm_refs = [] for vm_ref in self._session.get_xenapi().VM.get_all(): vm_rec = self._session.get_xenapi().VM.get_record(vm_ref) if not vm_rec["is_a_template"] and not vm_rec["is_control_domain"]: - vms.append(vm_rec["name_label"]) - return vms + vm_refs.append(vm_rec["name_label"]) + return vm_refs def _start(self, instance, vm_ref=None): """Power on a VM instance""" diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index d5ebd29d54df..72284ac024d9 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -117,16 +117,16 @@ class VolumeHelper(HelperBase): def introduce_vdi(cls, session, sr_ref): """Introduce VDI in the host""" try: - vdis = session.get_xenapi().SR.get_VDIs(sr_ref) + vdi_refs = session.get_xenapi().SR.get_VDIs(sr_ref) except cls.XenAPI.Failure, exc: LOG.exception(exc) raise StorageError(_('Unable to introduce VDI on SR %s') % sr_ref) try: - vdi_rec = session.get_xenapi().VDI.get_record(vdis[0]) + vdi_rec = session.get_xenapi().VDI.get_record(vdi_refs[0]) except cls.XenAPI.Failure, exc: LOG.exception(exc) raise StorageError(_('Unable to get record' - ' of VDI %s on') % vdis[0]) + ' of VDI %s on') % vdi_refs[0]) else: try: return session.get_xenapi().VDI.introduce( From 48196abaf7e9c47bfda3f744e0be9bc242004b72 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Fri, 11 Mar 2011 18:00:34 -0600 Subject: [PATCH 66/76] Review feedback --- nova/virt/xenapi/vm_utils.py | 56 ++++++++++++++++++------------------ nova/virt/xenapi/vmops.py | 8 +++--- 2 files changed, 32 insertions(+), 32 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 1a872345dbf9..4d55937e35be 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -443,29 +443,29 @@ class VMHelper(HelperBase): vdi_size += MBR_SIZE_BYTES name_label = get_name_label_for_image(image) - vdi = cls.create_vdi(session, sr_ref, name_label, vdi_size, False) + vdi_ref = cls.create_vdi(session, sr_ref, name_label, vdi_size, False) - with_vdi_attached_here(session, vdi, False, + with_vdi_attached_here(session, vdi_ref, False, lambda dev: _stream_disk(dev, image_type, virtual_size, image_file)) if image_type == ImageType.KERNEL_RAMDISK: #we need to invoke a plugin for copying VDI's #content into proper path - LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi) + LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi_ref) fn = "copy_kernel_vdi" args = {} - args['vdi-ref'] = vdi + args['vdi-ref'] = vdi_ref #let the plugin copy the correct number of bytes args['image-size'] = str(vdi_size) task = session.async_call_plugin('glance', fn, args) filename = session.wait_for_task(task, instance_id) #remove the VDI as it is not needed anymore - session.get_xenapi().VDI.destroy(vdi) - LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi) + session.get_xenapi().VDI.destroy(vdi_ref) + LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi_ref) return filename else: - return session.get_xenapi().VDI.get_uuid(vdi) + return session.get_xenapi().VDI.get_uuid(vdi_ref) @classmethod def determine_disk_image_type(cls, instance): @@ -840,16 +840,16 @@ def safe_find_sr(session): def find_sr(session): """Return the storage repository to hold VM images""" host = session.get_xenapi_host() - srs = session.get_xenapi().SR.get_all() - for sr in srs: - sr_rec = session.get_xenapi().SR.get_record(sr) + sr_refs = session.get_xenapi().SR.get_all() + for sr_ref in sr_refs: + sr_rec = session.get_xenapi().SR.get_record(sr_ref) if not ('i18n-key' in sr_rec['other_config'] and sr_rec['other_config']['i18n-key'] == 'local-storage'): continue - for pbd in sr_rec['PBDs']: - pbd_rec = session.get_xenapi().PBD.get_record(pbd) + for pbd_ref in sr_rec['PBDs']: + pbd_rec = session.get_xenapi().PBD.get_record(pbd_ref) if pbd_rec['host'] == host: - return sr + return sr_ref return None @@ -874,11 +874,11 @@ def remap_vbd_dev(dev): return remapped_dev -def with_vdi_attached_here(session, vdi, read_only, f): +def with_vdi_attached_here(session, vdi_ref, read_only, f): this_vm_ref = get_this_vm_ref(session) vbd_rec = {} vbd_rec['VM'] = this_vm_ref - vbd_rec['VDI'] = vdi + vbd_rec['VDI'] = vdi_ref vbd_rec['userdevice'] = 'autodetect' vbd_rec['bootable'] = False vbd_rec['mode'] = read_only and 'RO' or 'RW' @@ -889,14 +889,14 @@ def with_vdi_attached_here(session, vdi, read_only, f): vbd_rec['qos_algorithm_type'] = '' vbd_rec['qos_algorithm_params'] = {} vbd_rec['qos_supported_algorithms'] = [] - LOG.debug(_('Creating VBD for VDI %s ... '), vdi) - vbd = session.get_xenapi().VBD.create(vbd_rec) - LOG.debug(_('Creating VBD for VDI %s done.'), vdi) + LOG.debug(_('Creating VBD for VDI %s ... '), vdi_ref) + vbd_ref = session.get_xenapi().VBD.create(vbd_rec) + LOG.debug(_('Creating VBD for VDI %s done.'), vdi_ref) try: - LOG.debug(_('Plugging VBD %s ... '), vbd) - session.get_xenapi().VBD.plug(vbd) - LOG.debug(_('Plugging VBD %s done.'), vbd) - orig_dev = session.get_xenapi().VBD.get_device(vbd) + LOG.debug(_('Plugging VBD %s ... '), vbd_ref) + session.get_xenapi().VBD.plug(vbd_ref) + LOG.debug(_('Plugging VBD %s done.'), vbd_ref) + orig_dev = session.get_xenapi().VBD.get_device(vbd_ref) LOG.debug(_('VBD %(vbd)s plugged as %(orig_dev)s') % locals()) dev = remap_vbd_dev(orig_dev) if dev != orig_dev: @@ -904,13 +904,13 @@ def with_vdi_attached_here(session, vdi, read_only, f): 'remapping to %(dev)s') % locals()) return f(dev) finally: - LOG.debug(_('Destroying VBD for VDI %s ... '), vdi) - vbd_unplug_with_retry(session, vbd) - ignore_failure(session.get_xenapi().VBD.destroy, vbd) - LOG.debug(_('Destroying VBD for VDI %s done.'), vdi) + LOG.debug(_('Destroying VBD for VDI %s ... '), vdi_ref) + vbd_unplug_with_retry(session, vbd_ref) + ignore_failure(session.get_xenapi().VBD.destroy, vbd_ref) + LOG.debug(_('Destroying VBD for VDI %s done.'), vdi_ref) -def vbd_unplug_with_retry(session, vbd): +def vbd_unplug_with_retry(session, vbd_ref): """Call VBD.unplug on the given VBD, with a retry if we get DEVICE_DETACH_REJECTED. For reasons which I don't understand, we're seeing the device still in use, even when all processes using the device @@ -918,7 +918,7 @@ def vbd_unplug_with_retry(session, vbd): # FIXME(sirp): We can use LoopingCall here w/o blocking sleep() while True: try: - session.get_xenapi().VBD.unplug(vbd) + session.get_xenapi().VBD.unplug(vbd_ref) LOG.debug(_('VBD.unplug successful first time.')) return except VMHelper.XenAPI.Failure, e: diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index 382915b0cb3e..fcb290d0362b 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -87,8 +87,8 @@ class VMOps(object): def _spawn_with_disk(self, instance, vdi_uuid): """Create VM instance""" instance_name = instance.name - vm = VMHelper.lookup(self._session, instance_name) - if vm is not None: + vm_ref = VMHelper.lookup(self._session, instance_name) + if vm_ref is not None: raise exception.Duplicate(_('Attempted to create' ' non-unique name %s') % instance_name) @@ -639,8 +639,8 @@ class VMOps(object): instance._rescue = False for vbd_ref in vbd_refs: - vbd = self._session.get_xenapi().VBD.get_record(vbd_ref) - if vbd["userdevice"] == "1": + _vbd_ref = self._session.get_xenapi().VBD.get_record(vbd_ref) + if _vbd_ref["userdevice"] == "1": VMHelper.unplug_vbd(self._session, vbd_ref) VMHelper.destroy_vbd(self._session, vbd_ref) From b944cbcbf023ca321edcc511354b56aa5b07d438 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Fri, 11 Mar 2011 18:03:19 -0600 Subject: [PATCH 67/76] oops --- nova/virt/xenapi/vm_utils.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 4d55937e35be..f07b57796f46 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -897,10 +897,10 @@ def with_vdi_attached_here(session, vdi_ref, read_only, f): session.get_xenapi().VBD.plug(vbd_ref) LOG.debug(_('Plugging VBD %s done.'), vbd_ref) orig_dev = session.get_xenapi().VBD.get_device(vbd_ref) - LOG.debug(_('VBD %(vbd)s plugged as %(orig_dev)s') % locals()) + LOG.debug(_('VBD %(vbd_ref)s plugged as %(orig_dev)s') % locals()) dev = remap_vbd_dev(orig_dev) if dev != orig_dev: - LOG.debug(_('VBD %(vbd)s plugged into wrong dev, ' + LOG.debug(_('VBD %(vbd_ref)s plugged into wrong dev, ' 'remapping to %(dev)s') % locals()) return f(dev) finally: From de1197cfee200782a5a1d07fb40138d4f103890e Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Sun, 13 Mar 2011 10:49:56 +0100 Subject: [PATCH 68/76] Fix instructions for setting up the initial database. --- bin/nova-manage | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/nova-manage b/bin/nova-manage index a880a9c2fa80..cb4d18614406 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -276,7 +276,7 @@ def _db_error(caught_exception): print caught_exception print _("The above error may show that the database has not " "been created.\nPlease create a database using " - "nova-manage sync db before running this command.") + "'nova-manage db sync' before running this command.") exit(1) From c8fc7ed48be84e3b39ab88c8c103fbe52b6718e1 Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 14 Mar 2011 14:06:10 +0100 Subject: [PATCH 69/76] Add a unit test --- nova/network/linux_net.py | 17 +++++++++++------ nova/tests/test_network.py | 27 +++++++++++++++++++++++++++ 2 files changed, 38 insertions(+), 6 deletions(-) diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 0bcc3608107b..f55662a7a207 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -18,7 +18,7 @@ Implements vlans, bridges, and iptables rules using linux utilities. """ import os -import time +import calendar from nova import db from nova import exception @@ -380,12 +380,17 @@ interface %s def _host_lease(fixed_ip_ref): """Return a host string for an address in leasefile format""" instance_ref = fixed_ip_ref['instance'] - timestamp = time.mktime(instance_ref['updated_at'].timetuple()) + if instance_ref['updated_at']: + timestamp = instance_ref['updated_at'] + else: + timestamp = instance_ref['created_at'] - return "%d %s %s %s" % (timestamp + FLAGS.dhcp_lease_time, - instance_ref['mac_address'], - instance_ref['hostname'], - fixed_ip_ref['address']) + seconds_since_epoch = calendar.timegm(timestamp.utctimetuple()) + + return "%d %s %s %s *" % (seconds_since_epoch + FLAGS.dhcp_lease_time, + instance_ref['mac_address'], + fixed_ip_ref['address'], + instance_ref['hostname'] or '*') def _host_dhcp(fixed_ip_ref): diff --git a/nova/tests/test_network.py b/nova/tests/test_network.py index ce1c77210898..b7a76be8564f 100644 --- a/nova/tests/test_network.py +++ b/nova/tests/test_network.py @@ -20,6 +20,7 @@ Unit Tests for network code """ import IPy import os +import time from nova import context from nova import db @@ -29,6 +30,7 @@ from nova import log as logging from nova import test from nova import utils from nova.auth import manager +from nova.network import linux_net FLAGS = flags.FLAGS LOG = logging.getLogger('nova.tests.network') @@ -321,6 +323,31 @@ class NetworkTestCase(test.TestCase): network['id']) self.assertEqual(ip_count, num_available_ips) + def test_dhcp_lease_output(self): + admin_ctxt = context.get_admin_context() + address = self._create_address(0, self.instance_id) + lease_ip(address) + network_ref = db.network_get_by_instance(admin_ctxt, self.instance_id) + leases = linux_net.get_dhcp_leases(context.get_admin_context(), + network_ref['id']) + for line in leases.split('\n'): + seconds, mac, ip, hostname, client_id = line.split(' ') + self.assertTrue(int(seconds) > time.time(), 'Lease expires in ' + 'the past') + octets = mac.split(':') + self.assertEqual(len(octets), 6, "Wrong number of octets " + "in %s" % (max,)) + for octet in octets: + self.assertEqual(len(octet), 2, "Oddly sized octet: %s" + % (octet,)) + # This will throw an exception if the octet is invalid + int(octet, 16) + + # And this will raise an exception in case of an invalid IP + IPy.IP(ip) + + release_ip(address) + def is_allocated_in_project(address, project_id): """Returns true if address is in specified project""" From 62d7d521273e19d8e700ab301be38830576efa3b Mon Sep 17 00:00:00 2001 From: Ken Pepple Date: Mon, 14 Mar 2011 11:27:21 -0700 Subject: [PATCH 70/76] small typo in nova-manage vm live-migration --- bin/nova-manage | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/bin/nova-manage b/bin/nova-manage index 1eb4e5418f0e..2b42dfff5ce2 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -572,7 +572,7 @@ class VmCommands(object): """ ctxt = context.get_admin_context() - instance_id = ec2_id_to_id(ec2_id) + instance_id = ec2utils.ec2_id_to_id(ec2_id) if FLAGS.connection_type != 'libvirt': msg = _('Only KVM is supported for now. Sorry!') From f4e7da2d9f7d6793b383c5f187939f19ec849f0a Mon Sep 17 00:00:00 2001 From: Soren Hansen Date: Mon, 14 Mar 2011 21:10:11 +0100 Subject: [PATCH 71/76] Include cpuinfo.xml.template in tarball. --- MANIFEST.in | 1 + 1 file changed, 1 insertion(+) diff --git a/MANIFEST.in b/MANIFEST.in index 2ceed34f3f97..bf30d1546922 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -25,6 +25,7 @@ include nova/db/sqlalchemy/migrate_repo/migrate.cfg include nova/db/sqlalchemy/migrate_repo/README include nova/virt/interfaces.template include nova/virt/libvirt*.xml.template +include nova/virt/cpuinfo.xml.template include nova/tests/CA/ include nova/tests/CA/cacert.pem include nova/tests/CA/private/ From ce57cff740bc7f821bdbb0dd1367c037b6fa1c01 Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Mon, 14 Mar 2011 14:02:27 -0700 Subject: [PATCH 73/76] The exception is called "ApiError", not "APIError" --- nova/virt/libvirt_conn.py | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 0b306c950e3e..7994e95473f4 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -362,19 +362,19 @@ class LibvirtConnection(object): @exception.wrap_exception def pause(self, instance, callback): - raise exception.APIError("pause not supported for libvirt.") + raise exception.ApiError("pause not supported for libvirt.") @exception.wrap_exception def unpause(self, instance, callback): - raise exception.APIError("unpause not supported for libvirt.") + raise exception.ApiError("unpause not supported for libvirt.") @exception.wrap_exception def suspend(self, instance, callback): - raise exception.APIError("suspend not supported for libvirt") + raise exception.ApiError("suspend not supported for libvirt") @exception.wrap_exception def resume(self, instance, callback): - raise exception.APIError("resume not supported for libvirt") + raise exception.ApiError("resume not supported for libvirt") @exception.wrap_exception def rescue(self, instance, callback=None): @@ -779,7 +779,7 @@ class LibvirtConnection(object): 'cpu_time': cpu_time} def get_diagnostics(self, instance_name): - raise exception.APIError(_("diagnostics are not supported " + raise exception.ApiError(_("diagnostics are not supported " "for libvirt")) def get_disks(self, instance_name): From 408a2591d60f5d238e60e4be9197ccc7262f2406 Mon Sep 17 00:00:00 2001 From: Josh Kearney Date: Mon, 14 Mar 2011 16:21:33 -0500 Subject: [PATCH 74/76] PEP8 cleanup --- nova/tests/db/fakes.py | 4 ++-- nova/tests/test_xenapi.py | 11 +++++------ nova/virt/xenapi/vm_utils.py | 3 +-- 3 files changed, 8 insertions(+), 10 deletions(-) diff --git a/nova/tests/db/fakes.py b/nova/tests/db/fakes.py index 142f6b1c624c..5e9a3aa3b833 100644 --- a/nova/tests/db/fakes.py +++ b/nova/tests/db/fakes.py @@ -77,8 +77,8 @@ def stub_out_db_instance_api(stubs): 'mac_address': values['mac_address'], 'vcpus': type_data['vcpus'], 'local_gb': type_data['local_gb'], - 'os_type': values['os_type'] - } + 'os_type': values['os_type']} + return FakeModel(base_options) def fake_network_get_by_instance(context, instance_id): diff --git a/nova/tests/test_xenapi.py b/nova/tests/test_xenapi.py index cd125a3019a0..8b0affd5cb39 100644 --- a/nova/tests/test_xenapi.py +++ b/nova/tests/test_xenapi.py @@ -78,8 +78,7 @@ class XenAPIVolumeTestCase(test.TestCase): 'ramdisk_id': 3, 'instance_type': 'm1.large', 'mac_address': 'aa:bb:cc:dd:ee:ff', - 'os_type': 'linux' - } + 'os_type': 'linux'} def _create_volume(self, size='0'): """Create a volume object.""" @@ -315,8 +314,8 @@ class XenAPIVMTestCase(test.TestCase): 'ramdisk_id': ramdisk_id, 'instance_type': instance_type, 'mac_address': 'aa:bb:cc:dd:ee:ff', - 'os_type': os_type - } + 'os_type': os_type} + conn = xenapi_conn.get_connection(False) instance = db.instance_create(values) conn.spawn(instance) @@ -440,8 +439,8 @@ class XenAPIMigrateInstance(test.TestCase): 'ramdisk_id': None, 'instance_type': 'm1.large', 'mac_address': 'aa:bb:cc:dd:ee:ff', - 'os_type': 'linux' - } + 'os_type': 'linux'} + stubs.stub_out_migration_methods(self.stubs) glance_stubs.stubout_glance_client(self.stubs, glance_stubs.FakeGlance) diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index f07b57796f46..763c5fe40311 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -136,8 +136,7 @@ class VMHelper(HelperBase): 'VCPUs_at_startup': vcpus, 'VCPUs_max': vcpus, 'VCPUs_params': {}, - 'xenstore_data': {} - } + 'xenstore_data': {}} # Complete VM configuration record according to the image type # non-raw/raw with PV kernel/raw in HVM mode From 7c6aa5eb302637dde0d800f7155235858bbabbeb Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Mon, 14 Mar 2011 14:32:15 -0700 Subject: [PATCH 75/76] Removed duplicated test, renamed same-named (but non-identical) tests --- nova/tests/test_compute.py | 15 ++------------- 1 file changed, 2 insertions(+), 13 deletions(-) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index e486050be56a..14559d1dc666 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -115,7 +115,7 @@ class ComputeTestCase(test.TestCase): finally: db.instance_destroy(self.context, ref[0]['id']) - def test_create_instance_associates_security_groups(self): + def test_create_instance_associates_security_groups_1(self): """Make sure create associates security groups""" group = self._create_group() instance_ref = models.Instance() @@ -124,18 +124,7 @@ class ComputeTestCase(test.TestCase): instance_ref['hostname'] = 'i-00000001' return instance_ref - def test_create_instance_defaults_display_name(self): - """Verify that an instance cannot be created without a display_name.""" - cases = [dict(), dict(display_name=None)] - for instance in cases: - ref = self.compute_api.create(self.context, - FLAGS.default_instance_type, None, **instance) - try: - self.assertNotEqual(ref[0]['display_name'], None) - finally: - db.instance_destroy(self.context, ref[0]['id']) - - def test_create_instance_associates_security_groups(self): + def test_create_instance_associates_security_groups_2(self): """Make sure create associates security groups""" group = self._create_group() ref = self.compute_api.create( From 567e3bc3a7e66896482d83420190a7c4a8df1e5a Mon Sep 17 00:00:00 2001 From: Justin Santa Barbara Date: Mon, 14 Mar 2011 15:17:00 -0700 Subject: [PATCH 76/76] So the first of those tests doesn't pass. Removing as it looks like it was meant to be deleted. --- nova/tests/test_compute.py | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/nova/tests/test_compute.py b/nova/tests/test_compute.py index 14559d1dc666..3651f4cefcfc 100644 --- a/nova/tests/test_compute.py +++ b/nova/tests/test_compute.py @@ -115,16 +115,7 @@ class ComputeTestCase(test.TestCase): finally: db.instance_destroy(self.context, ref[0]['id']) - def test_create_instance_associates_security_groups_1(self): - """Make sure create associates security groups""" - group = self._create_group() - instance_ref = models.Instance() - instance_ref['id'] = 1 - instance_ref['volumes'] = [{'id': 1}, {'id': 2}] - instance_ref['hostname'] = 'i-00000001' - return instance_ref - - def test_create_instance_associates_security_groups_2(self): + def test_create_instance_associates_security_groups(self): """Make sure create associates security groups""" group = self._create_group() ref = self.compute_api.create(