diff --git a/.bzrignore b/.bzrignore index d81a7d829df7..b271561a3564 100644 --- a/.bzrignore +++ b/.bzrignore @@ -12,3 +12,4 @@ CA/openssl.cnf CA/serial* CA/newcerts/*.pem CA/private/cakey.pem +nova/vcsversion.py diff --git a/.mailmap b/.mailmap index 010678569a24..d13219ab03f2 100644 --- a/.mailmap +++ b/.mailmap @@ -16,6 +16,8 @@ + + Masumoto @@ -30,3 +32,4 @@ + diff --git a/Authors b/Authors index 639e68a59e10..82e07a6b58ef 100644 --- a/Authors +++ b/Authors @@ -3,8 +3,9 @@ Anne Gentle Anthony Young Antony Messerli Armando Migliaccio -Chris Behrens +Chiradeep Vittal Chmouel Boudjnah +Chris Behrens Cory Wright David Pravec Dean Troyer @@ -13,18 +14,29 @@ Ed Leafe Eldar Nugaev Eric Day Ewan Mellor +Hisaharu Ishii Hisaki Ohara +Ilya Alekseyev Jay Pipes Jesse Andrews Joe Heck Joel Moore Jonathan Bryce +Josh Durgin Josh Kearney Joshua McKenty Justin Santa Barbara +Kei Masumoto +Ken Pepple +Koji Iida +Lorin Hochstein Matt Dietz Michael Gundlach +Monsyne Dragon Monty Taylor +MORITA Kazutaka +Muneyuki Noguchi +Nachi Ueno Paul Voccio Rick Clark Rick Harris @@ -39,4 +51,3 @@ Trey Morris Vishvananda Ishaya Youcef Laribi Zhixue Wu - diff --git a/MANIFEST.in b/MANIFEST.in index 199ce30b6d01..07e4dd516a1d 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -12,6 +12,7 @@ include nova/cloudpipe/bootscript.sh include nova/cloudpipe/client.ovpn.template include nova/compute/fakevirtinstance.xml include nova/compute/interfaces.template +include nova/db/sqlalchemy/migrate_repo/migrate.cfg include nova/virt/interfaces.template include nova/virt/libvirt*.xml.template include nova/tests/CA/ diff --git a/README b/README index 851bca9dbfd5..f9334c295d02 100644 --- a/README +++ b/README @@ -1,7 +1,7 @@ The Choose Your Own Adventure README for Nova: You have come across a cloud computing fabric controller. It has identified - itself as "Nova." It is apparent that it maintains compatability with + itself as "Nova." It is apparent that it maintains compatibility with the popular Amazon EC2 and S3 APIs. To monitor it from a distance: follow @novacc on twitter @@ -10,7 +10,7 @@ To tame it for use in your own cloud: read http://nova.openstack.org/getting.sta To study its anatomy: read http://nova.openstack.org/architecture.html -To disect it in detail: visit http://code.launchpad.net/nova +To dissect it in detail: visit http://code.launchpad.net/nova To taunt it with its weaknesses: use http://bugs.launchpad.net/nova diff --git a/babel.cfg b/babel.cfg new file mode 100644 index 000000000000..15cd6cb76b93 --- /dev/null +++ b/babel.cfg @@ -0,0 +1,2 @@ +[python: **.py] + diff --git a/bin/nova-ajax-console-proxy b/bin/nova-ajax-console-proxy new file mode 100755 index 000000000000..2bc4076584af --- /dev/null +++ b/bin/nova-ajax-console-proxy @@ -0,0 +1,137 @@ +#!/usr/bin/env python +# pylint: disable-msg=C0103 +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Ajax Console Proxy Server""" + +from eventlet import greenthread +from eventlet.green import urllib2 + +import exceptions +import gettext +import logging +import os +import sys +import time +import urlparse + +# If ../nova/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): + sys.path.insert(0, possible_topdir) + +gettext.install('nova', unicode=1) + +from nova import flags +from nova import log as logging +from nova import rpc +from nova import utils +from nova import wsgi + +FLAGS = flags.FLAGS + +flags.DEFINE_integer('ajax_console_idle_timeout', 300, + 'Seconds before idle connection destroyed') + +LOG = logging.getLogger('nova.ajax_console_proxy') +LOG.setLevel(logging.DEBUG) +LOG.addHandler(logging.StreamHandler()) + + +class AjaxConsoleProxy(object): + tokens = {} + + def __call__(self, env, start_response): + try: + req_url = '%s://%s%s?%s' % (env['wsgi.url_scheme'], + env['HTTP_HOST'], + env['PATH_INFO'], + env['QUERY_STRING']) + if 'HTTP_REFERER' in env: + auth_url = env['HTTP_REFERER'] + else: + auth_url = req_url + + auth_params = urlparse.parse_qs(urlparse.urlparse(auth_url).query) + parsed_url = urlparse.urlparse(req_url) + + auth_info = AjaxConsoleProxy.tokens[auth_params['token'][0]] + args = auth_info['args'] + auth_info['last_activity'] = time.time() + + remote_url = ("http://%s:%s%s?token=%s" % ( + str(args['host']), + str(args['port']), + parsed_url.path, + str(args['token']))) + + opener = urllib2.urlopen(remote_url, env['wsgi.input'].read()) + body = opener.read() + info = opener.info() + + start_response("200 OK", info.dict.items()) + return body + except (exceptions.KeyError): + if env['PATH_INFO'] != '/favicon.ico': + LOG.audit("Unauthorized request %s, %s" + % (req_url, str(env))) + start_response("401 NOT AUTHORIZED", []) + return "Not Authorized" + except Exception: + start_response("500 ERROR", []) + return "Server Error" + + def register_listeners(self): + class Callback: + def __call__(self, data, message): + if data['method'] == 'authorize_ajax_console': + AjaxConsoleProxy.tokens[data['args']['token']] = \ + {'args': data['args'], 'last_activity': time.time()} + + conn = rpc.Connection.instance(new=True) + consumer = rpc.TopicConsumer( + connection=conn, + topic=FLAGS.ajax_console_proxy_topic) + consumer.register_callback(Callback()) + + def delete_expired_tokens(): + now = time.time() + to_delete = [] + for k, v in AjaxConsoleProxy.tokens.items(): + if now - v['last_activity'] > FLAGS.ajax_console_idle_timeout: + to_delete.append(k) + + for k in to_delete: + del AjaxConsoleProxy.tokens[k] + + utils.LoopingCall(consumer.fetch, auto_ack=True, + enable_callbacks=True).start(0.1) + utils.LoopingCall(delete_expired_tokens).start(1) + +if __name__ == '__main__': + utils.default_flagfile() + FLAGS(sys.argv) + server = wsgi.Server() + acp = AjaxConsoleProxy() + acp.register_listeners() + server.start(acp, FLAGS.ajax_console_proxy_port, host='0.0.0.0') + server.wait() diff --git a/bin/nova-api b/bin/nova-api index 1c671201eb58..7b4fbeab1402 100755 --- a/bin/nova-api +++ b/bin/nova-api @@ -34,23 +34,53 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): gettext.install('nova', unicode=1) -from nova import api from nova import flags -from nova import utils +from nova import log as logging from nova import wsgi +logging.basicConfig() +LOG = logging.getLogger('nova.api') +LOG.setLevel(logging.DEBUG) FLAGS = flags.FLAGS -flags.DEFINE_integer('osapi_port', 8774, 'OpenStack API port') -flags.DEFINE_string('osapi_host', '0.0.0.0', 'OpenStack API host') -flags.DEFINE_integer('ec2api_port', 8773, 'EC2 API port') -flags.DEFINE_string('ec2api_host', '0.0.0.0', 'EC2 API host') + +API_ENDPOINTS = ['ec2', 'osapi'] + + +def run_app(paste_config_file): + LOG.debug(_("Using paste.deploy config at: %s"), paste_config_file) + apps = [] + for api in API_ENDPOINTS: + config = wsgi.load_paste_configuration(paste_config_file, api) + if config is None: + LOG.debug(_("No paste configuration for app: %s"), api) + continue + LOG.debug(_("App Config: %s\n%r"), api, config) + wsgi.paste_config_to_flags(config, { + "verbose": FLAGS.verbose, + "%s_host" % api: config.get('host', '0.0.0.0'), + "%s_port" % api: getattr(FLAGS, "%s_port" % api)}) + LOG.info(_("Running %s API"), api) + app = wsgi.load_paste_app(paste_config_file, api) + apps.append((app, getattr(FLAGS, "%s_port" % api), + getattr(FLAGS, "%s_host" % api))) + if len(apps) == 0: + LOG.error(_("No known API applications configured in %s."), + paste_config_file) + return + + # NOTE(todd): redo logging config, verbose could be set in paste config + logging.basicConfig() + server = wsgi.Server() + for app in apps: + server.start(*app) + server.wait() if __name__ == '__main__': - utils.default_flagfile() FLAGS(sys.argv) - server = wsgi.Server() - server.start(api.API('os'), FLAGS.osapi_port, host=FLAGS.osapi_host) - server.start(api.API('ec2'), FLAGS.ec2api_port, host=FLAGS.ec2api_host) - server.wait() + conf = wsgi.paste_config_file('nova-api.conf') + if conf: + run_app(conf) + else: + LOG.error(_("No paste configuration found for: %s"), 'nova-api.conf') diff --git a/bin/nova-api-paste b/bin/nova-api-paste deleted file mode 100755 index 6ee833a18b96..000000000000 --- a/bin/nova-api-paste +++ /dev/null @@ -1,109 +0,0 @@ -#!/usr/bin/env python -# pylint: disable-msg=C0103 -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -"""Starter script for Nova API.""" - -import gettext -import logging -import os -import sys - -from paste import deploy - -# If ../nova/__init__.py exists, add ../ to Python search path, so that -# it will override what happens to be installed in /usr/(local/)lib/python... -possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), - os.pardir, - os.pardir)) -if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): - sys.path.insert(0, possible_topdir) - -gettext.install('nova', unicode=1) - -from nova import flags -from nova import wsgi - -LOG = logging.getLogger('nova.api') -LOG.setLevel(logging.DEBUG) -LOG.addHandler(logging.StreamHandler()) - -FLAGS = flags.FLAGS - -API_ENDPOINTS = ['ec2', 'openstack'] - - -def load_configuration(paste_config): - """Load the paste configuration from the config file and return it.""" - config = None - # Try each known name to get the global DEFAULTS, which will give ports - for name in API_ENDPOINTS: - try: - config = deploy.appconfig("config:%s" % paste_config, name=name) - except LookupError: - pass - if config: - verbose = config.get('verbose', None) - if verbose: - FLAGS.verbose = int(verbose) == 1 - if FLAGS.verbose: - logging.getLogger().setLevel(logging.DEBUG) - return config - LOG.debug(_("Paste config at %s has no secion for known apis"), - paste_config) - print _("Paste config at %s has no secion for any known apis") % \ - paste_config - os.exit(1) - - -def launch_api(paste_config_file, section, server, port, host): - """Launch an api server from the specified port and IP.""" - LOG.debug(_("Launching %s api on %s:%s"), section, host, port) - app = deploy.loadapp('config:%s' % paste_config_file, name=section) - server.start(app, int(port), host) - - -def run_app(paste_config_file): - LOG.debug(_("Using paste.deploy config at: %s"), configfile) - config = load_configuration(paste_config_file) - LOG.debug(_("Configuration: %r"), config) - server = wsgi.Server() - ip = config.get('host', '0.0.0.0') - for api in API_ENDPOINTS: - port = config.get("%s_port" % api, None) - if not port: - continue - host = config.get("%s_host" % api, ip) - launch_api(configfile, api, server, port, host) - LOG.debug(_("All api servers launched, now waiting")) - server.wait() - - -if __name__ == '__main__': - FLAGS(sys.argv) - configfiles = ['/etc/nova/nova-api.conf'] - if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): - configfiles.insert(0, - os.path.join(possible_topdir, 'etc', 'nova-api.conf')) - for configfile in configfiles: - if os.path.exists(configfile): - run_app(configfile) - break - else: - LOG.debug(_("Skipping missing configuration: %s"), configfile) diff --git a/bin/nova-combined b/bin/nova-combined index 53322f1a0828..913c866bf28e 100755 --- a/bin/nova-combined +++ b/bin/nova-combined @@ -36,23 +36,20 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): gettext.install('nova', unicode=1) -from nova import api from nova import flags +from nova import log as logging from nova import service from nova import utils from nova import wsgi FLAGS = flags.FLAGS -flags.DEFINE_integer('osapi_port', 8774, 'OpenStack API port') -flags.DEFINE_string('osapi_host', '0.0.0.0', 'OpenStack API host') -flags.DEFINE_integer('ec2api_port', 8773, 'EC2 API port') -flags.DEFINE_string('ec2api_host', '0.0.0.0', 'EC2 API host') if __name__ == '__main__': utils.default_flagfile() FLAGS(sys.argv) + logging.basicConfig() compute = service.Service.create(binary='nova-compute') network = service.Service.create(binary='nova-network') @@ -62,7 +59,22 @@ if __name__ == '__main__': service.serve(compute, network, volume, scheduler) - server = wsgi.Server() - server.start(api.API('os'), FLAGS.osapi_port, host=FLAGS.osapi_host) - server.start(api.API('ec2'), FLAGS.ec2api_port, host=FLAGS.ec2api_host) - server.wait() + apps = [] + paste_config_file = wsgi.paste_config_file('nova-api.conf') + for api in ['osapi', 'ec2']: + config = wsgi.load_paste_configuration(paste_config_file, api) + if config is None: + continue + wsgi.paste_config_to_flags(config, { + "verbose": FLAGS.verbose, + "%s_host" % api: config.get('host', '0.0.0.0'), + "%s_port" % api: getattr(FLAGS, "%s_port" % api)}) + app = wsgi.load_paste_app(paste_config_file, api) + apps.append((app, getattr(FLAGS, "%s_port" % api), + getattr(FLAGS, "%s_host" % api))) + if len(apps) > 0: + logging.basicConfig() + server = wsgi.Server() + for app in apps: + server.start(*app) + server.wait() diff --git a/bin/nova-console b/bin/nova-console new file mode 100755 index 000000000000..802cc80b608e --- /dev/null +++ b/bin/nova-console @@ -0,0 +1,44 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Starter script for Nova Console Proxy.""" + +import eventlet +eventlet.monkey_patch() + +import gettext +import os +import sys + +# If ../nova/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): + sys.path.insert(0, possible_topdir) + +gettext.install('nova', unicode=1) + +from nova import service +from nova import utils + +if __name__ == '__main__': + utils.default_flagfile() + service.serve() + service.wait() diff --git a/bin/nova-dhcpbridge b/bin/nova-dhcpbridge index 828aba3d1bac..1a994d956311 100755 --- a/bin/nova-dhcpbridge +++ b/bin/nova-dhcpbridge @@ -22,7 +22,6 @@ Handle lease database updates from DHCP servers. """ import gettext -import logging import os import sys @@ -39,6 +38,7 @@ gettext.install('nova', unicode=1) from nova import context from nova import db from nova import flags +from nova import log as logging from nova import rpc from nova import utils from nova.network import linux_net @@ -49,11 +49,13 @@ flags.DECLARE('network_size', 'nova.network.manager') flags.DECLARE('num_networks', 'nova.network.manager') flags.DECLARE('update_dhcp_on_disassociate', 'nova.network.manager') +LOG = logging.getLogger('nova.dhcpbridge') + def add_lease(mac, ip_address, _hostname, _interface): """Set the IP that was assigned by the DHCP server.""" if FLAGS.fake_rabbit: - logging.debug("leasing ip") + LOG.debug(_("leasing ip")) network_manager = utils.import_object(FLAGS.network_manager) network_manager.lease_fixed_ip(context.get_admin_context(), mac, @@ -68,14 +70,14 @@ def add_lease(mac, ip_address, _hostname, _interface): def old_lease(mac, ip_address, hostname, interface): """Update just as add lease.""" - logging.debug("Adopted old lease or got a change of mac/hostname") + LOG.debug(_("Adopted old lease or got a change of mac/hostname")) add_lease(mac, ip_address, hostname, interface) def del_lease(mac, ip_address, _hostname, _interface): """Called when a lease expires.""" if FLAGS.fake_rabbit: - logging.debug("releasing ip") + LOG.debug(_("releasing ip")) network_manager = utils.import_object(FLAGS.network_manager) network_manager.release_fixed_ip(context.get_admin_context(), mac, @@ -100,6 +102,7 @@ def main(): flagfile = os.environ.get('FLAGFILE', FLAGS.dhcpbridge_flagfile) utils.default_flagfile(flagfile) argv = FLAGS(sys.argv) + logging.basicConfig() interface = os.environ.get('DNSMASQ_INTERFACE', 'br0') if int(os.environ.get('TESTING', '0')): FLAGS.fake_rabbit = True @@ -117,9 +120,9 @@ def main(): mac = argv[2] ip = argv[3] hostname = argv[4] - logging.debug("Called %s for mac %s with ip %s and " - "hostname %s on interface %s", - action, mac, ip, hostname, interface) + LOG.debug(_("Called %s for mac %s with ip %s and " + "hostname %s on interface %s"), + action, mac, ip, hostname, interface) globals()[action + '_lease'](mac, ip, hostname, interface) else: print init_leases(interface) diff --git a/bin/nova-direct-api b/bin/nova-direct-api new file mode 100755 index 000000000000..e7dd14fb27b8 --- /dev/null +++ b/bin/nova-direct-api @@ -0,0 +1,61 @@ +#!/usr/bin/env python +# pylint: disable-msg=C0103 +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""Starter script for Nova Direct API.""" + +import gettext +import os +import sys + +# If ../nova/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): + sys.path.insert(0, possible_topdir) + +gettext.install('nova', unicode=1) + +from nova import flags +from nova import utils +from nova import wsgi +from nova.api import direct +from nova.compute import api as compute_api + + +FLAGS = flags.FLAGS +flags.DEFINE_integer('direct_port', 8001, 'Direct API port') +flags.DEFINE_string('direct_host', '0.0.0.0', 'Direct API host') + +if __name__ == '__main__': + utils.default_flagfile() + FLAGS(sys.argv) + + direct.register_service('compute', compute_api.ComputeAPI()) + direct.register_service('reflect', direct.Reflection()) + router = direct.Router() + with_json = direct.JsonParamsMiddleware(router) + with_req = direct.PostParamsMiddleware(with_json) + with_auth = direct.DelegatedAuthMiddleware(with_req) + + server = wsgi.Server() + server.start(with_auth, FLAGS.direct_port, host=FLAGS.direct_host) + server.wait() diff --git a/bin/nova-instancemonitor b/bin/nova-instancemonitor index 5dac3ffe608d..7dca0201451c 100755 --- a/bin/nova-instancemonitor +++ b/bin/nova-instancemonitor @@ -23,7 +23,6 @@ import gettext import os -import logging import sys from twisted.application import service @@ -37,19 +36,23 @@ if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): gettext.install('nova', unicode=1) +from nova import log as logging from nova import utils from nova import twistd from nova.compute import monitor +# TODO(todd): shouldn't this be done with flags? And what about verbose? logging.getLogger('boto').setLevel(logging.WARN) +LOG = logging.getLogger('nova.instancemonitor') + if __name__ == '__main__': utils.default_flagfile() twistd.serve(__file__) if __name__ == '__builtin__': - logging.warn('Starting instance monitor') + LOG.warn(_('Starting instance monitor')) # pylint: disable-msg=C0103 monitor = monitor.InstanceMonitor() diff --git a/bin/nova-logspool b/bin/nova-logspool new file mode 100644 index 000000000000..097459b12339 --- /dev/null +++ b/bin/nova-logspool @@ -0,0 +1,156 @@ +#!/usr/bin/env python + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +Tools for working with logs generated by nova components +""" + + +import json +import os +import re +import sys + + +class Request(object): + def __init__(self): + self.time = "" + self.host = "" + self.logger = "" + self.message = "" + self.trace = "" + self.env = "" + self.request_id = "" + + def add_error_line(self, error_line): + self.time = " ".join(error_line.split(" ")[:3]) + self.host = error_line.split(" ")[3] + self.logger = error_line.split("(")[1].split(" ")[0] + self.request_id = error_line.split("[")[1].split(" ")[0] + error_lines = error_line.split("#012") + self.message = self.clean_log_line(error_lines.pop(0)) + self.trace = "\n".join([self.clean_trace(l) for l in error_lines]) + + def add_environment_line(self, env_line): + self.env = self.clean_env_line(env_line) + + def clean_log_line(self, line): + """Remove log format for time, level, etc: split after context""" + return line.split('] ')[-1] + + def clean_env_line(self, line): + """Also has an 'Environment: ' string in the message""" + return re.sub(r'^Environment: ', '', self.clean_log_line(line)) + + def clean_trace(self, line): + """trace has a different format, so split on TRACE:""" + return line.split('TRACE: ')[-1] + + def to_dict(self): + return {'traceback': self.trace, 'message': self.message, + 'host': self.host, 'env': self.env, 'logger': self.logger, + 'request_id': self.request_id} + + +class LogReader(object): + def __init__(self, filename): + self.filename = filename + self._errors = {} + + def process(self, spooldir): + with open(self.filename) as f: + line = f.readline() + while len(line) > 0: + parts = line.split(" ") + level = (len(parts) < 6) or parts[5] + if level == 'ERROR': + self.handle_logged_error(line) + elif level == '[-]' and self.last_error: + # twisted stack trace line + clean_line = " ".join(line.split(" ")[6:]) + self.last_error.trace = self.last_error.trace + clean_line + else: + self.last_error = None + line = f.readline() + self.update_spool(spooldir) + + def handle_logged_error(self, line): + request_id = re.search(r' \[([A-Z0-9\-/]+)', line) + if not request_id: + raise Exception("Unable to parse request id from %s" % line) + request_id = request_id.group(1) + data = self._errors.get(request_id, Request()) + if self.is_env_line(line): + data.add_environment_line(line) + elif self.is_error_line(line): + data.add_error_line(line) + else: + # possibly error from twsited + data.add_error_line(line) + self.last_error = data + self._errors[request_id] = data + + def is_env_line(self, line): + return re.search('Environment: ', line) + + def is_error_line(self, line): + return re.search('raised', line) + + def update_spool(self, directory): + processed_dir = "%s/processed" % directory + self._ensure_dir_exists(processed_dir) + for rid, value in self._errors.iteritems(): + if not self.has_been_processed(processed_dir, rid): + with open("%s/%s" % (directory, rid), "w") as spool: + spool.write(json.dumps(value.to_dict())) + self.flush_old_processed_spool(processed_dir) + + def _ensure_dir_exists(self, d): + mkdir = False + try: + os.stat(d) + except: + mkdir = True + if mkdir: + os.mkdir(d) + + def has_been_processed(self, processed_dir, rid): + rv = False + try: + os.stat("%s/%s" % (processed_dir, rid)) + rv = True + except: + pass + return rv + + def flush_old_processed_spool(self, processed_dir): + keys = self._errors.keys() + procs = os.listdir(processed_dir) + for p in procs: + if p not in keys: + # log has rotated and the old error won't be seen again + os.unlink("%s/%s" % (processed_dir, p)) + +if __name__ == '__main__': + filename = '/var/log/nova.log' + spooldir = '/var/spool/nova' + if len(sys.argv) > 1: + filename = sys.argv[1] + if len(sys.argv) > 2: + spooldir = sys.argv[2] + LogReader(filename).process(spooldir) diff --git a/bin/nova-manage b/bin/nova-manage index 3416c1a52ee1..d0901ddfcaac 100755 --- a/bin/nova-manage +++ b/bin/nova-manage @@ -55,8 +55,8 @@ import datetime import gettext -import logging import os +import re import sys import time @@ -77,18 +77,22 @@ from nova import crypto from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import quota from nova import utils from nova.auth import manager from nova.cloudpipe import pipelib +from nova.db import migration +logging.basicConfig() FLAGS = flags.FLAGS flags.DECLARE('fixed_range', 'nova.network.manager') flags.DECLARE('num_networks', 'nova.network.manager') flags.DECLARE('network_size', 'nova.network.manager') flags.DECLARE('vlan_start', 'nova.network.manager') flags.DECLARE('vpn_start', 'nova.network.manager') +flags.DECLARE('fixed_range_v6', 'nova.network.manager') class VpnCommands(object): @@ -333,6 +337,11 @@ class ProjectCommands(object): arguments: name project_manager [description]""" self.manager.create_project(name, project_manager, description) + def modify(self, name, project_manager, description=None): + """Modifies a project + arguments: name project_manager [description]""" + self.manager.modify_project(name, project_manager, description) + def delete(self, name): """Deletes an existing project arguments: name""" @@ -432,11 +441,12 @@ class NetworkCommands(object): """Class for managing networks.""" def create(self, fixed_range=None, num_networks=None, - network_size=None, vlan_start=None, vpn_start=None): + network_size=None, vlan_start=None, vpn_start=None, + fixed_range_v6=None): """Creates fixed ips for host by range arguments: [fixed_range=FLAG], [num_networks=FLAG], [network_size=FLAG], [vlan_start=FLAG], - [vpn_start=FLAG]""" + [vpn_start=FLAG], [fixed_range_v6=FLAG]""" if not fixed_range: fixed_range = FLAGS.fixed_range if not num_networks: @@ -447,11 +457,13 @@ class NetworkCommands(object): vlan_start = FLAGS.vlan_start if not vpn_start: vpn_start = FLAGS.vpn_start + if not fixed_range_v6: + fixed_range_v6 = FLAGS.fixed_range_v6 net_manager = utils.import_object(FLAGS.network_manager) net_manager.create_networks(context.get_admin_context(), fixed_range, int(num_networks), int(network_size), int(vlan_start), - int(vpn_start)) + int(vpn_start), fixed_range_v6) class ServiceCommands(object): @@ -499,6 +511,30 @@ class ServiceCommands(object): db.service_update(ctxt, svc['id'], {'disabled': True}) +class LogCommands(object): + def request(self, request_id, logfile='/var/log/nova.log'): + """Show all fields in the log for the given request. Assumes you + haven't changed the log format too much. + ARGS: request_id [logfile]""" + lines = utils.execute("cat %s | grep '\[%s '" % (logfile, request_id)) + print re.sub('#012', "\n", "\n".join(lines)) + + +class DbCommands(object): + """Class for managing the database.""" + + def __init__(self): + pass + + def sync(self, version=None): + """Sync the database up to the most recent version.""" + return migration.db_sync(version) + + def version(self): + """Print the current database version.""" + print migration.db_version() + + CATEGORIES = [ ('user', UserCommands), ('project', ProjectCommands), @@ -507,7 +543,9 @@ CATEGORIES = [ ('vpn', VpnCommands), ('floating', FloatingIpCommands), ('network', NetworkCommands), - ('service', ServiceCommands)] + ('service', ServiceCommands), + ('log', LogCommands), + ('db', DbCommands)] def lazy_match(name, key_value_tuples): @@ -546,9 +584,6 @@ def main(): utils.default_flagfile() argv = FLAGS(sys.argv) - if FLAGS.verbose: - logging.getLogger().setLevel(logging.DEBUG) - script_name = argv.pop(0) if len(argv) < 1: print script_name + " category action []" diff --git a/bin/nova-spoolsentry b/bin/nova-spoolsentry new file mode 100644 index 000000000000..ab20268a9948 --- /dev/null +++ b/bin/nova-spoolsentry @@ -0,0 +1,97 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import base64 +import json +import logging +import os +import shutil +import sys +import urllib +import urllib2 +try: + import cPickle as pickle +except: + import pickle + + +class SpoolSentry(object): + def __init__(self, spool_dir, sentry_url, key=None): + self.spool_dir = spool_dir + self.sentry_url = sentry_url + self.key = key + + def process(self): + for fname in os.listdir(self.spool_dir): + if fname == "processed": + continue + try: + sourcefile = "%s/%s" % (self.spool_dir, fname) + with open(sourcefile) as f: + fdata = f.read() + data_from_json = json.loads(fdata) + data = self.build_data(data_from_json) + self.send_data(data) + destfile = "%s/processed/%s" % (self.spool_dir, fname) + shutil.move(sourcefile, destfile) + except: + logging.exception("Unable to upload record %s", fname) + raise + + def build_data(self, filejson): + env = {'SERVER_NAME': 'unknown', 'SERVER_PORT': '0000', + 'SCRIPT_NAME': '/unknown/', 'PATH_INFO': 'unknown'} + if filejson['env']: + env = json.loads(filejson['env']) + url = "http://%s:%s%s%s" % (env['SERVER_NAME'], env['SERVER_PORT'], + env['SCRIPT_NAME'], env['PATH_INFO']) + rv = {'logger': filejson['logger'], 'level': logging.ERROR, + 'server_name': filejson['host'], 'url': url, + 'message': filejson['message'], + 'traceback': filejson['traceback']} + rv['data'] = {} + if filejson['env']: + rv['data']['META'] = env + if filejson['request_id']: + rv['data']['request_id'] = filejson['request_id'] + return rv + + def send_data(self, data): + data = { + 'data': base64.b64encode(pickle.dumps(data).encode('zlib')), + 'key': self.key + } + req = urllib2.Request(self.sentry_url) + res = urllib2.urlopen(req, urllib.urlencode(data)) + if res.getcode() != 200: + raise Exception("Bad HTTP code: %s" % res.getcode()) + txt = res.read() + +if __name__ == '__main__': + sentryurl = 'http://127.0.0.1/sentry/store/' + key = '' + spooldir = '/var/spool/nova' + if len(sys.argv) > 1: + sentryurl = sys.argv[1] + if len(sys.argv) > 2: + key = sys.argv[2] + if len(sys.argv) > 3: + spooldir = sys.argv[3] + SpoolSentry(spooldir, sentryurl, key).process() diff --git a/bin/stack b/bin/stack new file mode 100755 index 000000000000..7a6ce5960d1c --- /dev/null +++ b/bin/stack @@ -0,0 +1,145 @@ +#!/usr/bin/env python +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""CLI for the Direct API.""" + +import eventlet +eventlet.monkey_patch() + +import os +import pprint +import sys +import textwrap +import urllib +import urllib2 + +# If ../nova/__init__.py exists, add ../ to Python search path, so that +# it will override what happens to be installed in /usr/(local/)lib/python... +possible_topdir = os.path.normpath(os.path.join(os.path.abspath(sys.argv[0]), + os.pardir, + os.pardir)) +if os.path.exists(os.path.join(possible_topdir, 'nova', '__init__.py')): + sys.path.insert(0, possible_topdir) + +import gflags +from nova import utils + + +FLAGS = gflags.FLAGS +gflags.DEFINE_string('host', '127.0.0.1', 'Direct API host') +gflags.DEFINE_integer('port', 8001, 'Direct API host') +gflags.DEFINE_string('user', 'user1', 'Direct API username') +gflags.DEFINE_string('project', 'proj1', 'Direct API project') + + +USAGE = """usage: stack [options] [arg1=value arg2=value] + + `stack help` should output the list of available controllers + `stack ` should output the available methods for that controller + `stack help ` should do the same + `stack help ` should output info for a method +""" + + +def format_help(d): + """Format help text, keys are labels and values are descriptions.""" + indent = max([len(k) for k in d]) + out = [] + for k, v in d.iteritems(): + t = textwrap.TextWrapper(initial_indent=' %s ' % k.ljust(indent), + subsequent_indent=' ' * (indent + 6)) + out.extend(t.wrap(v)) + return out + + +def help_all(): + rv = do_request('reflect', 'get_controllers') + out = format_help(rv) + return (USAGE + str(FLAGS.MainModuleHelp()) + + '\n\nAvailable controllers:\n' + + '\n'.join(out) + '\n') + + +def help_controller(controller): + rv = do_request('reflect', 'get_methods') + methods = dict([(k.split('/')[2], v) for k, v in rv.iteritems() + if k.startswith('/%s' % controller)]) + return ('Available methods for %s:\n' % controller + + '\n'.join(format_help(methods))) + + +def help_method(controller, method): + rv = do_request('reflect', + 'get_method_info', + {'method': '/%s/%s' % (controller, method)}) + + sig = '%s(%s):' % (method, ', '.join(['='.join(x) for x in rv['args']])) + out = textwrap.wrap(sig, subsequent_indent=' ' * len('%s(' % method)) + out.append('\n' + rv['doc']) + return '\n'.join(out) + + +def do_request(controller, method, params=None): + if params: + data = urllib.urlencode(params) + else: + data = None + + url = 'http://%s:%s/%s/%s' % (FLAGS.host, FLAGS.port, controller, method) + headers = {'X-OpenStack-User': FLAGS.user, + 'X-OpenStack-Project': FLAGS.project} + + req = urllib2.Request(url, data, headers) + resp = urllib2.urlopen(req) + return utils.loads(resp.read()) + + +if __name__ == '__main__': + args = FLAGS(sys.argv) + + cmd = args.pop(0) + if not args: + print help_all() + sys.exit() + + first = args.pop(0) + if first == 'help': + action = help_all + params = [] + if args: + params.append(args.pop(0)) + action = help_controller + if args: + params.append(args.pop(0)) + action = help_method + print action(*params) + sys.exit(0) + + controller = first + if not args: + print help_controller(controller) + sys.exit() + + method = args.pop(0) + params = {} + for x in args: + key, value = x.split('=', 1) + params[key] = value + + pprint.pprint(do_request(controller, method, params)) diff --git a/contrib/boto_v6/__init__.py b/contrib/boto_v6/__init__.py new file mode 100644 index 000000000000..9fec157f178d --- /dev/null +++ b/contrib/boto_v6/__init__.py @@ -0,0 +1,37 @@ +# Copyright (c) 2006-2010 Mitch Garnaat http://garnaat.org/ +# Copyright (c) 2010, Eucalyptus Systems, Inc. +# All rights reserved. +# +# Permission is hereby granted, free of charge, to any person obtaining a +# copy of this software and associated documentation files (the +# "Software"), to deal in the Software without restriction, including +# without limitation the rights to use, copy, modify, merge, publish, dis- +# tribute, sublicense, and/or sell copies of the Software, and to permit +# persons to whom the Software is furnished to do so, subject to the fol- +# lowing conditions: +# +# The above copyright notice and this permission notice shall be included +# in all copies or substantial portions of the Software. +# +# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS +# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL- +# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT +# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, +# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS +# IN THE SOFTWARE. + + +def connect_ec2(aws_access_key_id=None, aws_secret_access_key=None, **kwargs): + """ + :type aws_access_key_id: string + :param aws_access_key_id: Your AWS Access Key ID + + :type aws_secret_access_key: string + :param aws_secret_access_key: Your AWS Secret Access Key + + :rtype: :class:`boto.ec2.connection.EC2Connection` + :return: A connection to Amazon's EC2 + """ + from boto_v6.ec2.connection import EC2ConnectionV6 + return EC2ConnectionV6(aws_access_key_id, aws_secret_access_key, **kwargs) diff --git a/contrib/boto_v6/ec2/__init__.py b/contrib/boto_v6/ec2/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/contrib/boto_v6/ec2/connection.py b/contrib/boto_v6/ec2/connection.py new file mode 100644 index 000000000000..23466e5d70c8 --- /dev/null +++ b/contrib/boto_v6/ec2/connection.py @@ -0,0 +1,41 @@ +''' +Created on 2010/12/20 + +@author: Nachi Ueno +''' +import boto +import boto.ec2 +from boto_v6.ec2.instance import ReservationV6 + + +class EC2ConnectionV6(boto.ec2.EC2Connection): + ''' + EC2Connection for OpenStack IPV6 mode + ''' + def get_all_instances(self, instance_ids=None, filters=None): + """ + Retrieve all the instances associated with your account. + + :type instance_ids: list + :param instance_ids: A list of strings of instance IDs + + :type filters: dict + :param filters: Optional filters that can be used to limit + the results returned. Filters are provided + in the form of a dictionary consisting of + filter names as the key and filter values + as the value. The set of allowable filter + names/values is dependent on the request + being performed. Check the EC2 API guide + for details. + + :rtype: list + :return: A list of :class:`boto.ec2.instance.Reservation` + """ + params = {} + if instance_ids: + self.build_list_params(params, instance_ids, 'InstanceId') + if filters: + self.build_filter_params(params, filters) + return self.get_list('DescribeInstancesV6', params, + [('item', ReservationV6)]) diff --git a/contrib/boto_v6/ec2/instance.py b/contrib/boto_v6/ec2/instance.py new file mode 100644 index 000000000000..18b5cd33ad35 --- /dev/null +++ b/contrib/boto_v6/ec2/instance.py @@ -0,0 +1,37 @@ +''' +Created on 2010/12/20 + +@author: Nachi Ueno +''' +import boto +from boto.resultset import ResultSet +from boto.ec2.instance import Reservation +from boto.ec2.instance import Group +from boto.ec2.instance import Instance + + +class ReservationV6(Reservation): + def startElement(self, name, attrs, connection): + if name == 'instancesSet': + self.instances = ResultSet([('item', InstanceV6)]) + return self.instances + elif name == 'groupSet': + self.groups = ResultSet([('item', Group)]) + return self.groups + else: + return None + + +class InstanceV6(Instance): + def __init__(self, connection=None): + Instance.__init__(self, connection) + self.dns_name_v6 = None + + def endElement(self, name, value, connection): + Instance.endElement(self, name, value, connection) + if name == 'dnsNameV6': + self.dns_name_v6 = value + + def _update(self, updated): + self.__dict__.update(updated.__dict__) + self.dns_name_v6 = updated.dns_name_v6 diff --git a/contrib/nova.sh b/contrib/nova.sh index da1ba030c3f1..a0e8e642c116 100755 --- a/contrib/nova.sh +++ b/contrib/nova.sh @@ -78,13 +78,22 @@ if [ "$CMD" == "install" ]; then sudo apt-get install -y user-mode-linux kvm libvirt-bin sudo apt-get install -y screen euca2ools vlan curl rabbitmq-server sudo apt-get install -y lvm2 iscsitarget open-iscsi + sudo apt-get install -y socat echo "ISCSITARGET_ENABLE=true" | sudo tee /etc/default/iscsitarget sudo /etc/init.d/iscsitarget restart sudo modprobe kvm sudo /etc/init.d/libvirt-bin restart + sudo modprobe nbd sudo apt-get install -y python-twisted python-sqlalchemy python-mox python-greenlet python-carrot - sudo apt-get install -y python-daemon python-eventlet python-gflags python-tornado python-ipy - sudo apt-get install -y python-libvirt python-libxml2 python-routes + sudo apt-get install -y python-daemon python-eventlet python-gflags python-ipy + sudo apt-get install -y python-libvirt python-libxml2 python-routes python-cheetah +#For IPV6 + sudo apt-get install -y python-netaddr + sudo apt-get install -y radvd +#(Nati) Note that this configuration is only needed for nova-network node. + sudo bash -c "echo 1 > /proc/sys/net/ipv6/conf/all/forwarding" + sudo bash -c "echo 0 > /proc/sys/net/ipv6/conf/all/accept_ra" + if [ "$USE_MYSQL" == 1 ]; then cat < ### This will be the IP network that ALL the projects for future VM guests will reside on. E.g. 192.168.0.0/12 + +--network_size=<# of addrs> ### This is the total number of IP Addrs to use for VM guests, of all projects. E.g. 5000 + +The following code can be cut and paste, and edited to your setup: Note: CC_ADDR= -Nova development has consolidated all .conf files to nova.conf as of November 2010. References to specific .conf files may be ignored. - -#. These need to be defined in the nova.conf configuration file:: - - --sql_connection=mysql://root:nova@$CC_ADDR/nova # location of nova sql db - --s3_host=$CC_ADDR # This is where Nova is hosting the objectstore service, which - # will contain the VM images and buckets - --rabbit_host=$CC_ADDR # This is where the rabbit AMQP messaging service is hosted - --cc_host=$CC_ADDR # This is where the the nova-api service lives - --verbose # Optional but very helpful during initial setup - --ec2_url=http://$CC_ADDR:8773/services/Cloud - --network_manager=nova.network.manager.FlatManager # simple, no-vlan networking type - --fixed_range= # ip network to use for VM guests, ex 192.168.2.64/26 - --network_size=<# of addrs> # number of ip addrs to use for VM guests, ex 64 - -#. Create a nova group:: - - sudo addgroup nova - -The Nova config file should have its owner set to root:nova, and mode set to 0640, since they contain your MySQL server's root password. - +Detailed explanation of the following example is available above. + :: + +--sql_connection=mysql://root:nova@/nova +--s3_host= +--rabbit_host= +--cc_host= +--verbose +--ec2_url=http://:8773/services/Cloud +--network_manager=nova.network.manager.VlanManager +--fixed_range= +--network_size=<# of addrs> + +2. Create a “nova” group, and set permissions:: - cd /etc/nova - chown -R root:nova . + addgroup nova + +The Nova config file should have its owner set to root:nova, and mode set to 0644, since they contain your MySQL server's root password. :: -Step 3 Setup the sql db ------------------------ + chown -R root:nova /etc/nova + chmod 644 /etc/nova/nova.conf + +Step 3 - Setup the SQL DB (MySQL for this setup) +------------------------------------------------ + +1. First you 'preseed' to bypass all the installation prompts:: -1. First you 'preseed' (using the Quick Start method :doc:`../quickstart`). Run this as root. + bash + MYSQL_PASS=nova + cat < - /usr/bin/python /usr/bin/nova-manage project create - /usr/bin/python /usr/bin/nova-manage project create network - -Note: The nova-manage service assumes that the first IP address is your network (like 192.168.0.0), that the 2nd IP is your gateway (192.168.0.1), and that the broadcast is the very last IP in the range you defined (192.168.0.255). If this is not the case you will need to manually edit the sql db 'networks' table.o. - -On running this command, entries are made in the 'networks' and 'fixed_ips' table. However, one of the networks listed in the 'networks' table needs to be marked as bridge in order for the code to know that a bridge exists. The Network is marked as bridged automatically based on the type of network manager selected. - -More networking details to create a network bridge for flat network -^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - -Nova defaults to a bridge device named 'br100'. This needs to be created and somehow integrated into YOUR network. In my case, I wanted to keep things as simple as possible and have all the vm guests on the same network as the vm hosts (the compute nodes). Thus, I set the compute node's external IP address to be on the bridge and added eth0 to that bridge. To do this, edit your network interfaces config to look like the following:: + sed -i 's/127.0.0.1/0.0.0.0/g' /etc/mysql/my.cnf + service mysql restart + +3. Network Configuration + +If you use FlatManager (as opposed to VlanManager that we set) as your network manager, there are some additional networking changes you’ll have to make to ensure connectivity between your nodes and VMs. If you chose VlanManager or FlatDHCP, you may skip this section, as it’s set up for you automatically. + +Nova defaults to a bridge device named 'br100'. This needs to be created and somehow integrated into YOUR network. To keep things as simple as possible, have all the VM guests on the same network as the VM hosts (the compute nodes). To do so, set the compute node's external IP address to be on the bridge and add eth0 to that bridge. To do this, edit your network interfaces config to look like the following:: < begin /etc/network/interfaces > # The loopback network interface auto lo iface lo inet loopback - + # Networking for NOVA auto br100 - + iface br100 inet dhcp bridge_ports eth0 bridge_stp off bridge_maxwait 0 bridge_fd 0 < end /etc/network/interfaces > - - + Next, restart networking to apply the changes:: + + sudo /etc/init.d/networking restart - sudo /etc/init.d/networking restart +4. MySQL DB configuration: + +Create NOVA database:: -Step 5: Create nova certs. --------------------------- + mysql -uroot -p$MYSQL_PASS -e 'CREATE DATABASE nova;' + +Update the DB to include user 'root'@'%' with super user privileges:: -Generate the certs as a zip file:: + mysql -uroot -p$MYSQL_PASS -e "GRANT ALL PRIVILEGES ON *.* TO 'root'@'%' WITH GRANT OPTION;" + +Set mySQL root password:: - mkdir creds - sudo /usr/bin/python /usr/bin/nova-manage project zip admin admin creds/nova.zip + mysql -uroot -p$MYSQL_PASS -e "SET PASSWORD FOR 'root'@'%' = PASSWORD('$MYSQL_PASS');" + +Step 4 - Setup Nova environment +------------------------------- -you can get the rc file more easily with:: +These are the commands you run to set up a user and project:: - sudo /usr/bin/python /usr/bin/nova-manage project env admin admin creds/novarc + /usr/bin/python /usr/bin/nova-manage user admin + /usr/bin/python /usr/bin/nova-manage project create + /usr/bin/python /usr/bin/nova-manage network create + +Here is an example of what this looks like with real data:: -unzip them in your home directory, and add them to your environment:: + /usr/bin/python /usr/bin/nova-manage user admin dub + /usr/bin/python /usr/bin/nova-manage project create dubproject dub + /usr/bin/python /usr/bin/nova-manage network create 192.168.0.0/24 1 255 + +(I chose a /24 since that falls inside my /12 range I set in ‘fixed-range’ in nova.conf. Currently, there can only be one network, and I am using the max IP’s available in a /24. You can choose to use any valid amount that you would like.) + +Note: The nova-manage service assumes that the first IP address is your network (like 192.168.0.0), that the 2nd IP is your gateway (192.168.0.1), and that the broadcast is the very last IP in the range you defined (192.168.0.255). If this is not the case you will need to manually edit the sql db 'networks' table.o. + +On running this command, entries are made in the 'networks' and 'fixed_ips' table. However, one of the networks listed in the 'networks' table needs to be marked as bridge in order for the code to know that a bridge exists. The Network is marked as bridged automatically based on the type of network manager selected. This is ONLY necessary if you chose FlatManager as your network type. More information can be found at the end of this document discussing setting up the bridge device. + + +Step 5 - Create Nova certifications +----------------------------------- + +1. Generate the certs as a zip file. These are the certs you will use to launch instances, bundle images, and all the other assorted api functions. - unzip creds/nova.zip - echo ". creds/novarc" >> ~/.bashrc - ~/.bashrc +:: -Step 6 Restart all relevant services ------------------------------------- + mkdir –p /root/creds + /usr/bin/python /usr/bin/nova-manage project zipfile $NOVA_PROJECT $NOVA_PROJECT_USER /root/creds/novacreds.zip + +2. Unzip them in your home directory, and add them to your environment. -Restart Libvirt:: +:: - sudo /etc/init.d/libvirt-bin restart + unzip /root/creds/novacreds.zip -d /root/creds/ + cat /root/creds/novarc >> ~/.bashrc + source ~/.bashrc -Restart relevant nova services:: +Step 6 - Restart all relevant services +-------------------------------------- - sudo /etc/init.d/nova-compute restart - sudo /etc/init.d/nova-volume restart +Restart all six services in total, just to cover the entire spectrum:: + + libvirtd restart; service nova-network restart; service nova-compute restart; service nova-api restart; service nova-objectstore restart; service nova-scheduler restart +Step 7 - Closing steps, and cleaning up +--------------------------------------- -.. todo:: do we still need the content below? +One of the most commonly missed configuration areas is not allowing the proper access to VMs. Use the 'euca-authorize' command to enable access. Below, you will find the commands to allow 'ping' and 'ssh' to your VMs:: -Bare-metal Provisioning Notes ------------------------------ + euca-authorize -P icmp -t -1:-1 default + euca-authorize -P tcp -p 22 default -To install the base operating system you can use PXE booting. +Another common issue is you cannot ping or SSH your instances after issusing the 'euca-authorize' commands. Something to look at is the amount of 'dnsmasq' processes that are running. If you have a running instance, check to see that TWO 'dnsmasq' processes are running. If not, perform the following:: -Types of Hosts --------------- + killall dnsmasq + service nova-network restart -A single machine in your cluster can act as one or more of the following types -of host: +Step 8 – Testing the installation +--------------------------------- -Nova Services +You can then use `euca2ools` to test some items:: -* Network -* Compute -* Volume -* API -* Objectstore + euca-describe-images + euca-describe-instances + +If you have issues with the API key, you may need to re-source your creds file:: -Other supporting services + . /root/creds/novarc + +If you don’t get any immediate errors, you’re successfully making calls to your cloud! -* Message Queue -* Database (optional) -* Authentication database (optional) +Step 9 - Spinning up a VM for testing +------------------------------------- -Initial Setup -------------- +(This excerpt is from Thierry Carrez's blog, with reference to http://wiki.openstack.org/GettingImages.) -* Networking -* Cloudadmin User Creation +The image that you will use here will be a ttylinux image, so this is a limited function server. You will be able to ping and SSH to this instance, but it is in no way a full production VM. -Deployment Technologies ------------------------ +Download the image, and publish to your bucket: -Once you have machines with a base operating system installation, you can deploy -code and configuration with your favorite tools to specify which machines in -your cluster have which roles: +:: + + image="ttylinux-uec-amd64-12.1_2.6.35-22_1.tar.gz" + wget http://smoser.brickies.net/ubuntu/ttylinux-uec/$image + uec-publish-tarball $image mybucket + +This will output three references, an "emi", an "eri" and an "eki." (Image, ramdisk, and kernel) The emi is the one we use to launch instances, so take note of this. + +Create a keypair to SSH to the server: + +:: + + euca-add-keypair mykey > mykey.priv + + chmod 0600 mykey.priv + +Boot your instance: + +:: + + euca-run-instances $emi -k mykey -t m1.tiny + +($emi is replaced with the output from the previous command) + +Checking status, and confirming communication: + +Once you have booted the instance, you can check the status the the `euca-describe-instances` command. Here you can view the instance ID, IP, and current status of the VM. + +:: + + euca-describe-instances + +Once in a "running" state, you can use your SSH key connect: + +:: + + ssh -i mykey.priv root@$ipaddress + +When you are ready to terminate the instance, you may do so with the `euca-terminate-instances` command: + +:: + + euca-terminate-instances $instance-id + +You can determine the instance-id with `euca-describe-instances`, and the format is "i-" with a series of letter and numbers following: e.g. i-a4g9d. + +For more information in creating you own custom (production ready) instance images, please visit http://wiki.openstack.org/GettingImages for more information! + +Enjoy your new private cloud, and play responsibly! -* Puppet -* Chef diff --git a/doc/source/adminguide/network.flat.rst b/doc/source/adminguide/network.flat.rst index 1b8661a40382..3d8680c6fdec 100644 --- a/doc/source/adminguide/network.flat.rst +++ b/doc/source/adminguide/network.flat.rst @@ -1,5 +1,5 @@ .. - Copyright 2010 United States Government as represented by the + Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. diff --git a/doc/source/adminguide/network.vlan.rst b/doc/source/adminguide/network.vlan.rst index c6c4e7f91ea3..c06ce8e8bd89 100644 --- a/doc/source/adminguide/network.vlan.rst +++ b/doc/source/adminguide/network.vlan.rst @@ -1,5 +1,5 @@ .. - Copyright 2010 United States Government as represented by the + Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. @@ -91,11 +91,10 @@ These do NOT have IP addresses in the host to protect host access. Compute nodes have iptables/ebtables entries created per project and instance to protect against IP/MAC address spoofing and ARP poisoning. -The network assignment to a project, and IP address assignment to a VM instance, are triggered when a user starts to run a VM instance. When running a VM instance, a user needs to specify a project for the instances, and the security groups (described in Security Groups) when the instance wants to join. If this is the first instance to be created for the project, then Nova (the cloud controller) needs to find a network controller to be the network host for the project; it then sets up a private network by finding an unused VLAN id, an unused subnet, and then the controller assigns them to the project, it also assigns a name to the project's Linux bridge, and allocating a private IP within the project's subnet for the new instance. +The network assignment to a project, and IP address assignment to a VM instance, are triggered when a user starts to run a VM instance. When running a VM instance, a user needs to specify a project for the instances, and the security groups (described in Security Groups) when the instance wants to join. If this is the first instance to be created for the project, then Nova (the cloud controller) needs to find a network controller to be the network host for the project; it then sets up a private network by finding an unused VLAN id, an unused subnet, and then the controller assigns them to the project, it also assigns a name to the project's Linux bridge (br100 stored in the Nova database), and allocating a private IP within the project's subnet for the new instance. If the instance the user wants to start is not the project's first, a subnet and a VLAN must have already been assigned to the project; therefore the system needs only to find an available IP address within the subnet and assign it to the new starting instance. If there is no private IP available within the subnet, an exception will be raised to the cloud controller, and the VM creation cannot proceed. -.. todo:: insert the name of the Linux bridge, is it always named bridge? External Infrastructure ----------------------- diff --git a/doc/source/adminguide/nova.manage.rst b/doc/source/adminguide/nova.manage.rst index 0e5c4e06249e..0ec67c69cece 100644 --- a/doc/source/adminguide/nova.manage.rst +++ b/doc/source/adminguide/nova.manage.rst @@ -1,5 +1,5 @@ .. - Copyright 2010 United States Government as represented by the + Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. diff --git a/doc/source/cloud101.rst b/doc/source/cloud101.rst index 7c79d2a70f2d..9902ba502876 100644 --- a/doc/source/cloud101.rst +++ b/doc/source/cloud101.rst @@ -1,5 +1,5 @@ .. - Copyright 2010 United States Government as represented by the + Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. @@ -54,7 +54,7 @@ Cloud computing offers different service models depending on the capabilities a The US-based National Institute of Standards and Technology offers definitions for cloud computing and the service models that are emerging. -These definitions are summarized from http://csrc.nist.gov/groups/SNS/cloud-computing/. +These definitions are summarized from the `U.S. National Institute of Standards and Technology (NIST) cloud computing research group `_. SaaS - Software as a Service ^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -74,7 +74,6 @@ IaaS - Infrastructure as a Service Provides infrastructure such as computer instances, network connections, and storage so that people can run any software or operating system. - Types of Cloud Deployments -------------------------- @@ -87,4 +86,5 @@ A hybrid cloud can be a deployment model, as a composition of both public and pr Work in the Clouds ------------------ -.. todo:: What people have done/sample projects +What have people done with cloud computing? Cloud computing can help with large-scale computing needs or can lead consolidation efforts by virtualizing servers to make more use of existing hardware (and possibly release old hardware from service.) People also use cloud computing for collaboration because of the high availability through networked computers. Productivity suites for word processing, number crunching, and email communications, and more are also available through cloud computing. Cloud computing also avails additional storage to the cloud user, avoiding the need for additional hard drives on your desktop and enabling access to large data storage capacity online in the cloud. + diff --git a/doc/source/community.rst b/doc/source/community.rst index bfb93414ce1e..4ae32f1eb910 100644 --- a/doc/source/community.rst +++ b/doc/source/community.rst @@ -1,5 +1,5 @@ .. - Copyright 2010 United States Government as represented by the + Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. @@ -35,7 +35,8 @@ Contributing Code To contribute code, sign up for a Launchpad account and sign a contributor license agreement, available on the `OpenStack Wiki `_. Once the CLA is signed you -can contribute code through the Bazaar version control system which is related to your Launchpad account. +can contribute code through the Bazaar version control system which is related to your Launchpad +account. See the :doc:`devref/development.environment` page to get started. #openstack on Freenode IRC Network ---------------------------------- diff --git a/doc/source/conf.py b/doc/source/conf.py index 8f1b370ccbab..996dfb0a7ff5 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -60,10 +60,12 @@ copyright = u'2010, United States Government as represented by the Administrator # |version| and |release|, also used in various other places throughout the # built documents. # -# The short X.Y version. -version = '2011.1' +from nova import version as nova_version +#import nova.version # The full version, including alpha/beta/rc tags. -release = '2011.1-prerelease' +release = nova_version.version_string() +# The short X.Y version. +version = nova_version.canonical_version_string() # The language for content autogenerated by Sphinx. Refer to documentation # for a list of supported languages. diff --git a/doc/source/devref/addmethod.openstackapi.rst b/doc/source/devref/addmethod.openstackapi.rst index 4baa46e20568..dde50083beff 100644 --- a/doc/source/devref/addmethod.openstackapi.rst +++ b/doc/source/devref/addmethod.openstackapi.rst @@ -1,5 +1,5 @@ .. - Copyright 2010 OpenStack LLC + Copyright 2010-2011 OpenStack LLC All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may diff --git a/doc/source/devref/api.rst b/doc/source/devref/api.rst index 14181529a976..35abf1ae05ac 100644 --- a/doc/source/devref/api.rst +++ b/doc/source/devref/api.rst @@ -1,5 +1,5 @@ .. - Copyright 2010 United States Government as represented by the + Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. diff --git a/doc/source/devref/architecture.rst b/doc/source/devref/architecture.rst index 1e23e13617c8..233cd6f08add 100644 --- a/doc/source/devref/architecture.rst +++ b/doc/source/devref/architecture.rst @@ -1,5 +1,5 @@ .. - Copyright 2010 United States Government as represented by the + Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. diff --git a/doc/source/devref/auth.rst b/doc/source/devref/auth.rst index c3af3f945e45..830caba67332 100644 --- a/doc/source/devref/auth.rst +++ b/doc/source/devref/auth.rst @@ -1,5 +1,5 @@ .. - Copyright 2010 United States Government as represented by the + Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. diff --git a/doc/source/devref/cloudpipe.rst b/doc/source/devref/cloudpipe.rst index fb104c160e52..4f5d91e2881d 100644 --- a/doc/source/devref/cloudpipe.rst +++ b/doc/source/devref/cloudpipe.rst @@ -1,5 +1,5 @@ .. - Copyright 2010 United States Government as represented by the + Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. diff --git a/doc/source/devref/compute.rst b/doc/source/devref/compute.rst index db9ef6f3424b..31cc2037fa90 100644 --- a/doc/source/devref/compute.rst +++ b/doc/source/devref/compute.rst @@ -1,5 +1,5 @@ .. - Copyright 2010 United States Government as represented by the + Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. diff --git a/doc/source/devref/database.rst b/doc/source/devref/database.rst index 14559aa8c3ab..a26e4870570f 100644 --- a/doc/source/devref/database.rst +++ b/doc/source/devref/database.rst @@ -1,5 +1,5 @@ .. - Copyright 2010 United States Government as represented by the + Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. @@ -60,4 +60,4 @@ Tests ----- Tests are lacking for the db api layer and for the sqlalchemy driver. -Failures in the drivers would be dectected in other test cases, though. +Failures in the drivers would be detected in other test cases, though. diff --git a/doc/source/devref/development.environment.rst b/doc/source/devref/development.environment.rst index 6344c5382ced..f3c454d646fa 100644 --- a/doc/source/devref/development.environment.rst +++ b/doc/source/devref/development.environment.rst @@ -1,5 +1,5 @@ .. - Copyright 2010 United States Government as represented by the + Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. @@ -88,7 +88,12 @@ Here's how to get the latest code:: source .nova_venv/bin/activate ./run_tests.sh -And then you can do cleaning work or hack hack hack with a branched named cleaning:: +Then you can do cleaning work or hack hack hack with a branched named cleaning. + +Contributing Your Work +---------------------- + +Once your work is complete you may wish to contribute it to the project. Add your name and email address to the `Authors` file, and also to the `.mailmap` file if you use multiple email addresses. Your contributions can not be merged into trunk unless you are listed in the Authors file. Now, push the branch to Launchpad:: bzr push lp:~launchpaduserid/nova/cleaning diff --git a/doc/source/devref/fakes.rst b/doc/source/devref/fakes.rst index 0ba5d6ef2bec..6073447f0915 100644 --- a/doc/source/devref/fakes.rst +++ b/doc/source/devref/fakes.rst @@ -1,5 +1,5 @@ .. - Copyright 2010 United States Government as represented by the + Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. diff --git a/doc/source/devref/glance.rst b/doc/source/devref/glance.rst index d18f7fec6787..9a1c14d58b02 100644 --- a/doc/source/devref/glance.rst +++ b/doc/source/devref/glance.rst @@ -1,5 +1,5 @@ .. - Copyright 2010 United States Government as represented by the + Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. diff --git a/doc/source/devref/index.rst b/doc/source/devref/index.rst index 589609aceb5f..9613ba9900d7 100644 --- a/doc/source/devref/index.rst +++ b/doc/source/devref/index.rst @@ -1,5 +1,5 @@ .. - Copyright 2010 United States Government as represented by the + Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. diff --git a/doc/source/devref/network.rst b/doc/source/devref/network.rst index d9d0914946f7..eaf13e9bacb0 100644 --- a/doc/source/devref/network.rst +++ b/doc/source/devref/network.rst @@ -1,5 +1,5 @@ .. - Copyright 2010 United States Government as represented by the + Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. diff --git a/doc/source/devref/nova.rst b/doc/source/devref/nova.rst index 53ce6f34fc3c..093fbb3eee77 100644 --- a/doc/source/devref/nova.rst +++ b/doc/source/devref/nova.rst @@ -1,5 +1,5 @@ .. - Copyright 2010 United States Government as represented by the + Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. diff --git a/doc/source/devref/objectstore.rst b/doc/source/devref/objectstore.rst index 3ccfc8566fc6..f140e85e99ff 100644 --- a/doc/source/devref/objectstore.rst +++ b/doc/source/devref/objectstore.rst @@ -1,5 +1,5 @@ .. - Copyright 2010 United States Government as represented by the + Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. diff --git a/doc/source/devref/rabbit.rst b/doc/source/devref/rabbit.rst index ae0bac49dfad..c17a1322264a 100644 --- a/doc/source/devref/rabbit.rst +++ b/doc/source/devref/rabbit.rst @@ -1,5 +1,6 @@ .. Copyright (c) 2010 Citrix Systems, Inc. + All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain @@ -29,7 +30,7 @@ Nova (Austin release) uses both direct and topic-based exchanges. The architectu .. -Nova implements RPC (both request+response, and one-way, respectively nicknamed 'rpc.call' and 'rpc.cast') over AMQP by providing an adapter class which take cares of marshalling and unmarshalling of messages into function calls. Each Nova service (for example Compute, Volume, etc.) create two queues at the initialization time, one which accepts messages with routing keys 'NODE-TYPE.NODE-ID' (for example compute.hostname) and another, which accepts messages with routing keys as generic 'NODE-TYPE' (for example compute). The former is used specifically when Nova-API needs to redirect commands to a specific node like 'euca-terminate instance'. In this case, only the compute node whose host's hypervisor is running the virtual machine can kill the instance. The API acts as a consumer when RPC calls are request/response, otherwise is acts as publisher only. +Nova implements RPC (both request+response, and one-way, respectively nicknamed 'rpc.call' and 'rpc.cast') over AMQP by providing an adapter class which take cares of marshaling and unmarshaling of messages into function calls. Each Nova service (for example Compute, Volume, etc.) create two queues at the initialization time, one which accepts messages with routing keys 'NODE-TYPE.NODE-ID' (for example compute.hostname) and another, which accepts messages with routing keys as generic 'NODE-TYPE' (for example compute). The former is used specifically when Nova-API needs to redirect commands to a specific node like 'euca-terminate instance'. In this case, only the compute node whose host's hypervisor is running the virtual machine can kill the instance. The API acts as a consumer when RPC calls are request/response, otherwise is acts as publisher only. Nova RPC Mappings ----------------- @@ -39,7 +40,7 @@ The figure below shows the internals of a RabbitMQ node when a single instance i Figure 2 shows the following internal elements: * Topic Publisher: a Topic Publisher comes to life when an rpc.call or an rpc.cast operation is executed; this object is instantiated and used to push a message to the queuing system. Every publisher connects always to the same topic-based exchange; its life-cycle is limited to the message delivery. - * Direct Consumer: a Direct Consumer comes to life if (an only if) a rpc.call operation is executed; this object is instantiated and used to receive a response message from the queuing system; Every consumer connects to a unique direct-based exchange via a unique exclusive queue; its life-cycle is limited to the message delivery; the exchange and queue identifiers are determined by a UUID generator, and are marshalled in the message sent by the Topic Publisher (only rpc.call operations). + * Direct Consumer: a Direct Consumer comes to life if (an only if) a rpc.call operation is executed; this object is instantiated and used to receive a response message from the queuing system; Every consumer connects to a unique direct-based exchange via a unique exclusive queue; its life-cycle is limited to the message delivery; the exchange and queue identifiers are determined by a UUID generator, and are marshaled in the message sent by the Topic Publisher (only rpc.call operations). * Topic Consumer: a Topic Consumer comes to life as soon as a Worker is instantiated and exists throughout its life-cycle; this object is used to receive messages from the queue and it invokes the appropriate action as defined by the Worker role. A Topic Consumer connects to the same topic-based exchange either via a shared queue or via a unique exclusive queue. Every Worker has two topic consumers, one that is addressed only during rpc.cast operations (and it connects to a shared queue whose exchange key is 'topic') and the other that is addressed only during rpc.call operations (and it connects to a unique queue whose exchange key is 'topic.host'). * Direct Publisher: a Direct Publisher comes to life only during rpc.call operations and it is instantiated to return the message required by the request/response operation. The object connects to a direct-based exchange whose identity is dictated by the incoming message. * Topic Exchange: The Exchange is a routing table that exists in the context of a virtual host (the multi-tenancy mechanism provided by RabbitMQ); its type (such as topic vs. direct) determines the routing policy; a RabbitMQ node will have only one topic-based exchange for every topic in Nova. diff --git a/doc/source/devref/scheduler.rst b/doc/source/devref/scheduler.rst index ab74b6ba837e..06678151437a 100644 --- a/doc/source/devref/scheduler.rst +++ b/doc/source/devref/scheduler.rst @@ -1,5 +1,5 @@ .. - Copyright 2010 United States Government as represented by the + Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. diff --git a/doc/source/devref/services.rst b/doc/source/devref/services.rst index f5bba5c126d5..ae237a248d71 100644 --- a/doc/source/devref/services.rst +++ b/doc/source/devref/services.rst @@ -1,5 +1,5 @@ .. - Copyright 2010 United States Government as represented by the + Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. diff --git a/doc/source/devref/volume.rst b/doc/source/devref/volume.rst index 54a2d4f8bccb..c4dddb9ea883 100644 --- a/doc/source/devref/volume.rst +++ b/doc/source/devref/volume.rst @@ -1,5 +1,5 @@ .. - Copyright 2010 United States Government as represented by the + Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. diff --git a/doc/source/index.rst b/doc/source/index.rst index b9ba6208a448..6eec09acbe03 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1,5 +1,5 @@ .. - Copyright 2010 United States Government as represented by the + Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. diff --git a/doc/source/livecd.rst b/doc/source/livecd.rst index b355fa180163..37c92c8bca6a 100644 --- a/doc/source/livecd.rst +++ b/doc/source/livecd.rst @@ -1,3 +1,20 @@ +.. + Copyright 2010-2011 OpenStack LLC + + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + Installing the Live CD ====================== diff --git a/doc/source/nova.concepts.rst b/doc/source/nova.concepts.rst index fb3969a433e6..e9687dc98427 100644 --- a/doc/source/nova.concepts.rst +++ b/doc/source/nova.concepts.rst @@ -1,5 +1,5 @@ .. - Copyright 2010 United States Government as represented by the + Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. @@ -105,7 +105,7 @@ It is important to know that there are user-specific (sometimes called global) r For example: A user can access api commands allowed to the netadmin role (like allocate_address) only if he has the user-specific netadmin role AND the project-specific netadmin role. -More information about RBAC can be found in the :ref:`auth`. +More information about RBAC can be found in :ref:`auth`. Concept: API ------------ @@ -159,12 +159,10 @@ vpn management, and much more. See :doc:`nova.manage` in the Administration Guide for more details. - Concept: Flags -------------- -Nova uses python-gflags for a distributed command line system, and the flags can either be set when running a command at the command line or within flag files. When you install Nova packages, each Nova service gets its own flag file. For example, nova-network.conf is used for configuring the nova-network service, and so forth. - +Nova uses python-gflags for a distributed command line system, and the flags can either be set when running a command at the command line or within a flag file. When you install Nova packages for the Austin release, each nova service gets its own flag file. For example, nova-network.conf is used for configuring the nova-network service, and so forth. In releases beyond Austin which was released in October 2010, all flags are set in nova.conf. Concept: Plugins ---------------- diff --git a/doc/source/object.model.rst b/doc/source/object.model.rst index c8d4df7367fe..d02f151fdc7a 100644 --- a/doc/source/object.model.rst +++ b/doc/source/object.model.rst @@ -1,3 +1,20 @@ +.. + Copyright 2010-2011 OpenStack LLC + + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + Object Model ============ @@ -25,29 +42,38 @@ Object Model Users ----- +Each Nova User is authorized based on their access key and secret key, assigned per-user. Read more at :doc:`/adminguide/managing.users`. + Projects -------- +For Nova, access to images is based on the project. Read more at :doc:`/adminguide/managing.projects`. Images ------ +Images are binary files that run the operating system. Read more at :doc:`/adminguide/managing.images`. Instances --------- +Instances are running virtual servers. Read more at :doc:`/adminguide/managing.instances`. Volumes ------- +.. todo:: Write doc about volumes Security Groups --------------- +In Nova, a security group is a named collection of network access rules, like firewall policies. Read more at `Security Groups `_. VLANs ----- +VLAN is the default network mode for Nova. Read more at :doc:`/adminguide/network.vlan`. IP Addresses ------------ +Nova enables floating IP management. \ No newline at end of file diff --git a/doc/source/quickstart.rst b/doc/source/quickstart.rst index fa5d96738ca2..17c9e10a884c 100644 --- a/doc/source/quickstart.rst +++ b/doc/source/quickstart.rst @@ -1,5 +1,5 @@ .. - Copyright 2010 United States Government as represented by the + Copyright 2010-2011 United States Government as represented by the Administrator of the National Aeronautics and Space Administration. All Rights Reserved. diff --git a/doc/source/service.architecture.rst b/doc/source/service.architecture.rst index 28a32bec6b56..8fa1e3306976 100644 --- a/doc/source/service.architecture.rst +++ b/doc/source/service.architecture.rst @@ -1,3 +1,20 @@ +.. + Copyright 2010-2011 OpenStack LLC + + All Rights Reserved. + + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + Service Architecture ==================== diff --git a/etc/nova-api.conf b/etc/nova-api.conf index c5dd0aaec450..4873e465d180 100644 --- a/etc/nova-api.conf +++ b/etc/nova-api.conf @@ -1,9 +1,5 @@ [DEFAULT] verbose = 1 -ec2_port = 8773 -ec2_address = 0.0.0.0 -openstack_port = 8774 -openstack_address = 0.0.0.0 ####### # EC2 # @@ -12,52 +8,80 @@ openstack_address = 0.0.0.0 [composite:ec2] use = egg:Paste#urlmap /: ec2versions -/services: ec2api +/services/Cloud: ec2cloud +/services/Admin: ec2admin /latest: ec2metadata -/200: ec2metadata +/20: ec2metadata /1.0: ec2metadata -[pipeline:ec2api] -pipeline = authenticate router authorizer ec2executor +[pipeline:ec2cloud] +pipeline = logrequest authenticate cloudrequest authorizer ec2executor +#pipeline = logrequest ec2lockout authenticate cloudrequest authorizer ec2executor + +[pipeline:ec2admin] +pipeline = logrequest authenticate adminrequest authorizer ec2executor + +[pipeline:ec2metadata] +pipeline = logrequest ec2md + +[pipeline:ec2versions] +pipeline = logrequest ec2ver + +[filter:logrequest] +paste.filter_factory = nova.api.ec2:RequestLogging.factory + +[filter:ec2lockout] +paste.filter_factory = nova.api.ec2:Lockout.factory [filter:authenticate] -paste.filter_factory = nova.api.ec2:authenticate_factory +paste.filter_factory = nova.api.ec2:Authenticate.factory -[filter:router] -paste.filter_factory = nova.api.ec2:router_factory +[filter:cloudrequest] +controller = nova.api.ec2.cloud.CloudController +paste.filter_factory = nova.api.ec2:Requestify.factory + +[filter:adminrequest] +controller = nova.api.ec2.admin.AdminController +paste.filter_factory = nova.api.ec2:Requestify.factory [filter:authorizer] -paste.filter_factory = nova.api.ec2:authorizer_factory +paste.filter_factory = nova.api.ec2:Authorizer.factory [app:ec2executor] -paste.app_factory = nova.api.ec2:executor_factory +paste.app_factory = nova.api.ec2:Executor.factory -[app:ec2versions] -paste.app_factory = nova.api.ec2:versions_factory +[app:ec2ver] +paste.app_factory = nova.api.ec2:Versions.factory -[app:ec2metadata] -paste.app_factory = nova.api.ec2.metadatarequesthandler:metadata_factory +[app:ec2md] +paste.app_factory = nova.api.ec2.metadatarequesthandler:MetadataRequestHandler.factory ############# # Openstack # ############# -[composite:openstack] +[composite:osapi] use = egg:Paste#urlmap /: osversions /v1.0: openstackapi [pipeline:openstackapi] -pipeline = auth ratelimit osapi +pipeline = faultwrap auth ratelimit osapiapp + +[filter:faultwrap] +paste.filter_factory = nova.api.openstack:FaultWrapper.factory [filter:auth] -paste.filter_factory = nova.api.openstack.auth:auth_factory +paste.filter_factory = nova.api.openstack.auth:AuthMiddleware.factory [filter:ratelimit] -paste.filter_factory = nova.api.openstack.ratelimiting:ratelimit_factory +paste.filter_factory = nova.api.openstack.ratelimiting:RateLimitingMiddleware.factory -[app:osapi] -paste.app_factory = nova.api.openstack:router_factory +[app:osapiapp] +paste.app_factory = nova.api.openstack:APIRouter.factory -[app:osversions] -paste.app_factory = nova.api.openstack:versions_factory +[pipeline:osversions] +pipeline = faultwrap osversionapp + +[app:osversionapp] +paste.app_factory = nova.api.openstack:Versions.factory diff --git a/krm_mapping.json.sample b/krm_mapping.json.sample new file mode 100644 index 000000000000..1ecfba635b0e --- /dev/null +++ b/krm_mapping.json.sample @@ -0,0 +1,3 @@ +{ + "machine" : ["kernel", "ramdisk"] +} diff --git a/locale/nova.pot b/locale/nova.pot new file mode 100644 index 000000000000..a96411e33922 --- /dev/null +++ b/locale/nova.pot @@ -0,0 +1,2130 @@ +# Translations template for nova. +# Copyright (C) 2011 ORGANIZATION +# This file is distributed under the same license as the nova project. +# FIRST AUTHOR , 2011. +# +#, fuzzy +msgid "" +msgstr "" +"Project-Id-Version: nova 2011.1\n" +"Report-Msgid-Bugs-To: EMAIL@ADDRESS\n" +"POT-Creation-Date: 2011-01-10 11:25-0800\n" +"PO-Revision-Date: YEAR-MO-DA HO:MI+ZONE\n" +"Last-Translator: FULL NAME \n" +"Language-Team: LANGUAGE \n" +"MIME-Version: 1.0\n" +"Content-Type: text/plain; charset=utf-8\n" +"Content-Transfer-Encoding: 8bit\n" +"Generated-By: Babel 0.9.4\n" + +#: nova/crypto.py:46 +msgid "Filename of root CA" +msgstr "" + +#: nova/crypto.py:49 +msgid "Filename of private key" +msgstr "" + +#: nova/crypto.py:51 +msgid "Filename of root Certificate Revokation List" +msgstr "" + +#: nova/crypto.py:53 +msgid "Where we keep our keys" +msgstr "" + +#: nova/crypto.py:55 +msgid "Where we keep our root CA" +msgstr "" + +#: nova/crypto.py:57 +msgid "Should we use a CA for each project?" +msgstr "" + +#: nova/crypto.py:61 +#, python-format +msgid "Subject for certificate for users, %s for project, user, timestamp" +msgstr "" + +#: nova/crypto.py:66 +#, python-format +msgid "Subject for certificate for projects, %s for project, timestamp" +msgstr "" + +#: nova/crypto.py:71 +#, python-format +msgid "Subject for certificate for vpns, %s for project, timestamp" +msgstr "" + +#: nova/crypto.py:258 +#, python-format +msgid "Flags path: %s" +msgstr "" + +#: nova/exception.py:33 +msgid "Unexpected error while running command." +msgstr "" + +#: nova/exception.py:36 +#, python-format +msgid "" +"%s\n" +"Command: %s\n" +"Exit code: %s\n" +"Stdout: %r\n" +"Stderr: %r" +msgstr "" + +#: nova/exception.py:86 +msgid "Uncaught exception" +msgstr "" + +#: nova/fakerabbit.py:48 +#, python-format +msgid "(%s) publish (key: %s) %s" +msgstr "" + +#: nova/fakerabbit.py:53 +#, python-format +msgid "Publishing to route %s" +msgstr "" + +#: nova/fakerabbit.py:83 +#, python-format +msgid "Declaring queue %s" +msgstr "" + +#: nova/fakerabbit.py:89 +#, python-format +msgid "Declaring exchange %s" +msgstr "" + +#: nova/fakerabbit.py:95 +#, python-format +msgid "Binding %s to %s with key %s" +msgstr "" + +#: nova/fakerabbit.py:120 +#, python-format +msgid "Getting from %s: %s" +msgstr "" + +#: nova/rpc.py:92 +#, python-format +msgid "AMQP server on %s:%d is unreachable. Trying again in %d seconds." +msgstr "" + +#: nova/rpc.py:99 +#, python-format +msgid "Unable to connect to AMQP server after %d tries. Shutting down." +msgstr "" + +#: nova/rpc.py:118 +msgid "Reconnected to queue" +msgstr "" + +#: nova/rpc.py:125 +msgid "Failed to fetch message from queue" +msgstr "" + +#: nova/rpc.py:155 +#, python-format +msgid "Initing the Adapter Consumer for %s" +msgstr "" + +#: nova/rpc.py:170 +#, python-format +msgid "received %s" +msgstr "" + +#: nova/rpc.py:183 +#, python-format +msgid "no method for message: %s" +msgstr "" + +#: nova/rpc.py:184 +#, python-format +msgid "No method for message: %s" +msgstr "" + +#: nova/rpc.py:245 +#, python-format +msgid "Returning exception %s to caller" +msgstr "" + +#: nova/rpc.py:286 +#, python-format +msgid "unpacked context: %s" +msgstr "" + +#: nova/rpc.py:305 +msgid "Making asynchronous call..." +msgstr "" + +#: nova/rpc.py:308 +#, python-format +msgid "MSG_ID is %s" +msgstr "" + +#: nova/rpc.py:356 +#, python-format +msgid "response %s" +msgstr "" + +#: nova/rpc.py:365 +#, python-format +msgid "topic is %s" +msgstr "" + +#: nova/rpc.py:366 +#, python-format +msgid "message %s" +msgstr "" + +#: nova/service.py:157 +#, python-format +msgid "Starting %s node" +msgstr "" + +#: nova/service.py:169 +msgid "Service killed that has no database entry" +msgstr "" + +#: nova/service.py:190 +msgid "The service database object disappeared, Recreating it." +msgstr "" + +#: nova/service.py:202 +msgid "Recovered model server connection!" +msgstr "" + +#: nova/service.py:208 +msgid "model server went away" +msgstr "" + +#: nova/service.py:217 nova/db/sqlalchemy/__init__.py:43 +#, python-format +msgid "Data store %s is unreachable. Trying again in %d seconds." +msgstr "" + +#: nova/service.py:232 nova/twistd.py:232 +#, python-format +msgid "Serving %s" +msgstr "" + +#: nova/service.py:234 nova/twistd.py:264 +msgid "Full set of FLAGS:" +msgstr "" + +#: nova/twistd.py:211 +#, python-format +msgid "pidfile %s does not exist. Daemon not running?\n" +msgstr "" + +#: nova/twistd.py:268 +#, python-format +msgid "Starting %s" +msgstr "" + +#: nova/utils.py:53 +#, python-format +msgid "Inner Exception: %s" +msgstr "" + +#: nova/utils.py:54 +#, python-format +msgid "Class %s cannot be found" +msgstr "" + +#: nova/utils.py:113 +#, python-format +msgid "Fetching %s" +msgstr "" + +#: nova/utils.py:125 +#, python-format +msgid "Running cmd (subprocess): %s" +msgstr "" + +#: nova/utils.py:138 +#, python-format +msgid "Result was %s" +msgstr "" + +#: nova/utils.py:171 +#, python-format +msgid "debug in callback: %s" +msgstr "" + +#: nova/utils.py:176 +#, python-format +msgid "Running %s" +msgstr "" + +#: nova/utils.py:207 +#, python-format +msgid "Couldn't get IP, using 127.0.0.1 %s" +msgstr "" + +#: nova/utils.py:289 +#, python-format +msgid "Invalid backend: %s" +msgstr "" + +#: nova/utils.py:300 +#, python-format +msgid "backend %s" +msgstr "" + +#: nova/api/ec2/__init__.py:133 +msgid "Too many failed authentications." +msgstr "" + +#: nova/api/ec2/__init__.py:142 +#, python-format +msgid "" +"Access key %s has had %d failed authentications and will be locked out " +"for %d minutes." +msgstr "" + +#: nova/api/ec2/__init__.py:179 nova/objectstore/handler.py:140 +#, python-format +msgid "Authentication Failure: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:190 +#, python-format +msgid "Authenticated Request For %s:%s)" +msgstr "" + +#: nova/api/ec2/__init__.py:227 +#, python-format +msgid "action: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:229 +#, python-format +msgid "arg: %s\t\tval: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:301 +#, python-format +msgid "Unauthorized request for controller=%s and action=%s" +msgstr "" + +#: nova/api/ec2/__init__.py:339 +#, python-format +msgid "NotFound raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:342 +#, python-format +msgid "ApiError raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:349 +#, python-format +msgid "Unexpected error raised: %s" +msgstr "" + +#: nova/api/ec2/__init__.py:354 +msgid "An unknown error has occurred. Please try your request again." +msgstr "" + +#: nova/api/ec2/admin.py:84 +#, python-format +msgid "Creating new user: %s" +msgstr "" + +#: nova/api/ec2/admin.py:92 +#, python-format +msgid "Deleting user: %s" +msgstr "" + +#: nova/api/ec2/admin.py:114 +#, python-format +msgid "Adding role %s to user %s for project %s" +msgstr "" + +#: nova/api/ec2/admin.py:117 nova/auth/manager.py:415 +#, python-format +msgid "Adding sitewide role %s to user %s" +msgstr "" + +#: nova/api/ec2/admin.py:122 +#, python-format +msgid "Removing role %s from user %s for project %s" +msgstr "" + +#: nova/api/ec2/admin.py:125 nova/auth/manager.py:441 +#, python-format +msgid "Removing sitewide role %s from user %s" +msgstr "" + +#: nova/api/ec2/admin.py:129 nova/api/ec2/admin.py:192 +msgid "operation must be add or remove" +msgstr "" + +#: nova/api/ec2/admin.py:142 +#, python-format +msgid "Getting x509 for user: %s on project: %s" +msgstr "" + +#: nova/api/ec2/admin.py:159 +#, python-format +msgid "Create project %s managed by %s" +msgstr "" + +#: nova/api/ec2/admin.py:170 +#, python-format +msgid "Delete project: %s" +msgstr "" + +#: nova/api/ec2/admin.py:184 nova/auth/manager.py:533 +#, python-format +msgid "Adding user %s to project %s" +msgstr "" + +#: nova/api/ec2/admin.py:188 +#, python-format +msgid "Removing user %s from project %s" +msgstr "" + +#: nova/api/ec2/apirequest.py:95 +#, python-format +msgid "Unsupported API request: controller = %s,action = %s" +msgstr "" + +#: nova/api/ec2/cloud.py:117 +#, python-format +msgid "Generating root CA: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:277 +#, python-format +msgid "Create key pair %s" +msgstr "" + +#: nova/api/ec2/cloud.py:285 +#, python-format +msgid "Delete key pair %s" +msgstr "" + +#: nova/api/ec2/cloud.py:357 +#, python-format +msgid "%s is not a valid ipProtocol" +msgstr "" + +#: nova/api/ec2/cloud.py:361 +msgid "Invalid port range" +msgstr "" + +#: nova/api/ec2/cloud.py:392 +#, python-format +msgid "Revoke security group ingress %s" +msgstr "" + +#: nova/api/ec2/cloud.py:401 nova/api/ec2/cloud.py:414 +msgid "No rule for the specified parameters." +msgstr "" + +#: nova/api/ec2/cloud.py:421 +#, python-format +msgid "Authorize security group ingress %s" +msgstr "" + +#: nova/api/ec2/cloud.py:432 +#, python-format +msgid "This rule already exists in group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:460 +#, python-format +msgid "Create Security Group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:463 +#, python-format +msgid "group %s already exists" +msgstr "" + +#: nova/api/ec2/cloud.py:475 +#, python-format +msgid "Delete security group %s" +msgstr "" + +#: nova/api/ec2/cloud.py:483 nova/compute/manager.py:452 +#, python-format +msgid "Get console output for instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:543 +#, python-format +msgid "Create volume of %s GB" +msgstr "" + +#: nova/api/ec2/cloud.py:567 +#, python-format +msgid "Attach volume %s to instacne %s at %s" +msgstr "" + +#: nova/api/ec2/cloud.py:579 +#, python-format +msgid "Detach volume %s" +msgstr "" + +#: nova/api/ec2/cloud.py:686 +msgid "Allocate address" +msgstr "" + +#: nova/api/ec2/cloud.py:691 +#, python-format +msgid "Release address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:696 +#, python-format +msgid "Associate address %s to instance %s" +msgstr "" + +#: nova/api/ec2/cloud.py:703 +#, python-format +msgid "Disassociate address %s" +msgstr "" + +#: nova/api/ec2/cloud.py:730 +msgid "Going to start terminating instances" +msgstr "" + +#: nova/api/ec2/cloud.py:738 +#, python-format +msgid "Reboot instance %r" +msgstr "" + +#: nova/api/ec2/cloud.py:775 +#, python-format +msgid "De-registering image %s" +msgstr "" + +#: nova/api/ec2/cloud.py:783 +#, python-format +msgid "Registered image %s with id %s" +msgstr "" + +#: nova/api/ec2/cloud.py:789 nova/api/ec2/cloud.py:804 +#, python-format +msgid "attribute not supported: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:794 +#, python-format +msgid "invalid id: %s" +msgstr "" + +#: nova/api/ec2/cloud.py:807 +msgid "user or group not specified" +msgstr "" + +#: nova/api/ec2/cloud.py:809 +msgid "only group \"all\" is supported" +msgstr "" + +#: nova/api/ec2/cloud.py:811 +msgid "operation_type must be add or remove" +msgstr "" + +#: nova/api/ec2/cloud.py:812 +#, python-format +msgid "Updating image %s publicity" +msgstr "" + +#: nova/api/ec2/metadatarequesthandler.py:75 +#, python-format +msgid "Failed to get metadata for ip: %s" +msgstr "" + +#: nova/api/openstack/__init__.py:70 +#, python-format +msgid "Caught error: %s" +msgstr "" + +#: nova/api/openstack/__init__.py:86 +msgid "Including admin operations in API." +msgstr "" + +#: nova/api/openstack/servers.py:184 +#, python-format +msgid "Compute.api::lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:199 +#, python-format +msgid "Compute.api::unlock %s" +msgstr "" + +#: nova/api/openstack/servers.py:213 +#, python-format +msgid "Compute.api::get_lock %s" +msgstr "" + +#: nova/api/openstack/servers.py:224 +#, python-format +msgid "Compute.api::pause %s" +msgstr "" + +#: nova/api/openstack/servers.py:235 +#, python-format +msgid "Compute.api::unpause %s" +msgstr "" + +#: nova/api/openstack/servers.py:246 +#, python-format +msgid "compute.api::suspend %s" +msgstr "" + +#: nova/api/openstack/servers.py:257 +#, python-format +msgid "compute.api::resume %s" +msgstr "" + +#: nova/auth/dbdriver.py:84 +#, python-format +msgid "User %s already exists" +msgstr "" + +#: nova/auth/dbdriver.py:106 nova/auth/ldapdriver.py:207 +#, python-format +msgid "Project can't be created because manager %s doesn't exist" +msgstr "" + +#: nova/auth/dbdriver.py:135 nova/auth/ldapdriver.py:204 +#, python-format +msgid "Project can't be created because project %s already exists" +msgstr "" + +#: nova/auth/dbdriver.py:157 nova/auth/ldapdriver.py:241 +#, python-format +msgid "Project can't be modified because manager %s doesn't exist" +msgstr "" + +#: nova/auth/dbdriver.py:245 +#, python-format +msgid "User \"%s\" not found" +msgstr "" + +#: nova/auth/dbdriver.py:248 +#, python-format +msgid "Project \"%s\" not found" +msgstr "" + +#: nova/auth/fakeldap.py:33 +msgid "Attempted to instantiate singleton" +msgstr "" + +#: nova/auth/ldapdriver.py:181 +#, python-format +msgid "LDAP object for %s doesn't exist" +msgstr "" + +#: nova/auth/ldapdriver.py:218 +#, python-format +msgid "Project can't be created because user %s doesn't exist" +msgstr "" + +#: nova/auth/ldapdriver.py:478 +#, python-format +msgid "User %s is already a member of the group %s" +msgstr "" + +#: nova/auth/ldapdriver.py:507 +#, python-format +msgid "" +"Attempted to remove the last member of a group. Deleting the group at %s " +"instead." +msgstr "" + +#: nova/auth/ldapdriver.py:528 +#, python-format +msgid "Group at dn %s doesn't exist" +msgstr "" + +#: nova/auth/manager.py:259 +#, python-format +msgid "Looking up user: %r" +msgstr "" + +#: nova/auth/manager.py:263 +#, python-format +msgid "Failed authorization for access key %s" +msgstr "" + +#: nova/auth/manager.py:264 +#, python-format +msgid "No user found for access key %s" +msgstr "" + +#: nova/auth/manager.py:270 +#, python-format +msgid "Using project name = user name (%s)" +msgstr "" + +#: nova/auth/manager.py:275 +#, python-format +msgid "failed authorization: no project named %s (user=%s)" +msgstr "" + +#: nova/auth/manager.py:277 +#, python-format +msgid "No project called %s could be found" +msgstr "" + +#: nova/auth/manager.py:281 +#, python-format +msgid "Failed authorization: user %s not admin and not member of project %s" +msgstr "" + +#: nova/auth/manager.py:283 +#, python-format +msgid "User %s is not a member of project %s" +msgstr "" + +#: nova/auth/manager.py:292 nova/auth/manager.py:303 +#, python-format +msgid "Invalid signature for user %s" +msgstr "" + +#: nova/auth/manager.py:293 nova/auth/manager.py:304 +msgid "Signature does not match" +msgstr "" + +#: nova/auth/manager.py:374 +msgid "Must specify project" +msgstr "" + +#: nova/auth/manager.py:408 +#, python-format +msgid "The %s role can not be found" +msgstr "" + +#: nova/auth/manager.py:410 +#, python-format +msgid "The %s role is global only" +msgstr "" + +#: nova/auth/manager.py:412 +#, python-format +msgid "Adding role %s to user %s in project %s" +msgstr "" + +#: nova/auth/manager.py:438 +#, python-format +msgid "Removing role %s from user %s on project %s" +msgstr "" + +#: nova/auth/manager.py:505 +#, python-format +msgid "Created project %s with manager %s" +msgstr "" + +#: nova/auth/manager.py:523 +#, python-format +msgid "modifying project %s" +msgstr "" + +#: nova/auth/manager.py:553 +#, python-format +msgid "Remove user %s from project %s" +msgstr "" + +#: nova/auth/manager.py:581 +#, python-format +msgid "Deleting project %s" +msgstr "" + +#: nova/auth/manager.py:637 +#, python-format +msgid "Created user %s (admin: %r)" +msgstr "" + +#: nova/auth/manager.py:645 +#, python-format +msgid "Deleting user %s" +msgstr "" + +#: nova/auth/manager.py:655 +#, python-format +msgid "Access Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:657 +#, python-format +msgid "Secret Key change for user %s" +msgstr "" + +#: nova/auth/manager.py:659 +#, python-format +msgid "Admin status set to %r for user %s" +msgstr "" + +#: nova/auth/manager.py:708 +#, python-format +msgid "No vpn data for project %s" +msgstr "" + +#: nova/cloudpipe/pipelib.py:45 +msgid "Template for script to run on cloudpipe instance boot" +msgstr "" + +#: nova/cloudpipe/pipelib.py:48 +msgid "Network to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:51 +msgid "Netmask to push into openvpn config" +msgstr "" + +#: nova/cloudpipe/pipelib.py:97 +#, python-format +msgid "Launching VPN for %s" +msgstr "" + +#: nova/compute/api.py:67 +#, python-format +msgid "Instance %d was not found in get_network_topic" +msgstr "" + +#: nova/compute/api.py:73 +#, python-format +msgid "Instance %d has no host" +msgstr "" + +#: nova/compute/api.py:92 +#, python-format +msgid "Quota exceeeded for %s, tried to run %s instances" +msgstr "" + +#: nova/compute/api.py:94 +#, python-format +msgid "Instance quota exceeded. You can only run %s more instances of this type." +msgstr "" + +#: nova/compute/api.py:109 +msgid "Creating a raw instance" +msgstr "" + +#: nova/compute/api.py:156 +#, python-format +msgid "Going to run %s instances..." +msgstr "" + +#: nova/compute/api.py:180 +#, python-format +msgid "Casting to scheduler for %s/%s's instance %s" +msgstr "" + +#: nova/compute/api.py:279 +#, python-format +msgid "Going to try and terminate %s" +msgstr "" + +#: nova/compute/api.py:283 +#, python-format +msgid "Instance %d was not found during terminate" +msgstr "" + +#: nova/compute/api.py:288 +#, python-format +msgid "Instance %d is already being terminated" +msgstr "" + +#: nova/compute/api.py:450 +#, python-format +msgid "Invalid device specified: %s. Example device: /dev/vdb" +msgstr "" + +#: nova/compute/api.py:465 +msgid "Volume isn't attached to anything!" +msgstr "" + +#: nova/compute/disk.py:71 +#, python-format +msgid "Input partition size not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:75 +#, python-format +msgid "Bytes for local storage not evenly divisible by sector size: %d / %d" +msgstr "" + +#: nova/compute/disk.py:128 +#, python-format +msgid "Could not attach image to loopback: %s" +msgstr "" + +#: nova/compute/disk.py:136 +#, python-format +msgid "Failed to load partition: %s" +msgstr "" + +#: nova/compute/disk.py:158 +#, python-format +msgid "Failed to mount filesystem: %s" +msgstr "" + +#: nova/compute/instance_types.py:41 +#, python-format +msgid "Unknown instance type: %s" +msgstr "" + +#: nova/compute/manager.py:69 +#, python-format +msgid "check_instance_lock: decorating: |%s|" +msgstr "" + +#: nova/compute/manager.py:71 +#, python-format +msgid "check_instance_lock: arguments: |%s| |%s| |%s|" +msgstr "" + +#: nova/compute/manager.py:75 +#, python-format +msgid "check_instance_lock: locked: |%s|" +msgstr "" + +#: nova/compute/manager.py:77 +#, python-format +msgid "check_instance_lock: admin: |%s|" +msgstr "" + +#: nova/compute/manager.py:82 +#, python-format +msgid "check_instance_lock: executing: |%s|" +msgstr "" + +#: nova/compute/manager.py:86 +#, python-format +msgid "check_instance_lock: not executing |%s|" +msgstr "" + +#: nova/compute/manager.py:157 +msgid "Instance has already been created" +msgstr "" + +#: nova/compute/manager.py:158 +#, python-format +msgid "instance %s: starting..." +msgstr "" + +#: nova/compute/manager.py:197 +#, python-format +msgid "instance %s: Failed to spawn" +msgstr "" + +#: nova/compute/manager.py:211 nova/tests/test_cloud.py:228 +#, python-format +msgid "Terminating instance %s" +msgstr "" + +#: nova/compute/manager.py:217 +#, python-format +msgid "Disassociating address %s" +msgstr "" + +#: nova/compute/manager.py:230 +#, python-format +msgid "Deallocating address %s" +msgstr "" + +#: nova/compute/manager.py:243 +#, python-format +msgid "trying to destroy already destroyed instance: %s" +msgstr "" + +#: nova/compute/manager.py:257 +#, python-format +msgid "Rebooting instance %s" +msgstr "" + +#: nova/compute/manager.py:260 +#, python-format +msgid "trying to reboot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:286 +#, python-format +msgid "instance %s: snapshotting" +msgstr "" + +#: nova/compute/manager.py:289 +#, python-format +msgid "trying to snapshot a non-running instance: %s (state: %s excepted: %s)" +msgstr "" + +#: nova/compute/manager.py:301 +#, python-format +msgid "instance %s: rescuing" +msgstr "" + +#: nova/compute/manager.py:316 +#, python-format +msgid "instance %s: unrescuing" +msgstr "" + +#: nova/compute/manager.py:335 +#, python-format +msgid "instance %s: pausing" +msgstr "" + +#: nova/compute/manager.py:352 +#, python-format +msgid "instance %s: unpausing" +msgstr "" + +#: nova/compute/manager.py:369 +#, python-format +msgid "instance %s: retrieving diagnostics" +msgstr "" + +#: nova/compute/manager.py:382 +#, python-format +msgid "instance %s: suspending" +msgstr "" + +#: nova/compute/manager.py:401 +#, python-format +msgid "instance %s: resuming" +msgstr "" + +#: nova/compute/manager.py:420 +#, python-format +msgid "instance %s: locking" +msgstr "" + +#: nova/compute/manager.py:432 +#, python-format +msgid "instance %s: unlocking" +msgstr "" + +#: nova/compute/manager.py:442 +#, python-format +msgid "instance %s: getting locked state" +msgstr "" + +#: nova/compute/manager.py:462 +#, python-format +msgid "instance %s: attaching volume %s to %s" +msgstr "" + +#: nova/compute/manager.py:478 +#, python-format +msgid "instance %s: attach failed %s, removing" +msgstr "" + +#: nova/compute/manager.py:493 +#, python-format +msgid "Detach volume %s from mountpoint %s on instance %s" +msgstr "" + +#: nova/compute/manager.py:497 +#, python-format +msgid "Detaching volume from unknown instance %s" +msgstr "" + +#: nova/compute/monitor.py:259 +#, python-format +msgid "updating %s..." +msgstr "" + +#: nova/compute/monitor.py:289 +msgid "unexpected error during update" +msgstr "" + +#: nova/compute/monitor.py:355 +#, python-format +msgid "Cannot get blockstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:377 +#, python-format +msgid "Cannot get ifstats for \"%s\" on \"%s\"" +msgstr "" + +#: nova/compute/monitor.py:412 +msgid "unexpected exception getting connection" +msgstr "" + +#: nova/compute/monitor.py:427 +#, python-format +msgid "Found instance: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:43 +msgid "Use of empty request context is deprecated" +msgstr "" + +#: nova/db/sqlalchemy/api.py:132 +#, python-format +msgid "No service for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:229 +#, python-format +msgid "No service for %s, %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:574 +#, python-format +msgid "No floating ip for address %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:668 +#, python-format +msgid "No instance for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:758 nova/virt/libvirt_conn.py:598 +#: nova/virt/xenapi/volumeops.py:48 nova/virt/xenapi/volumeops.py:103 +#, python-format +msgid "Instance %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:891 +#, python-format +msgid "no keypair for user %s, name %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1006 nova/db/sqlalchemy/api.py:1064 +#, python-format +msgid "No network for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1036 +#, python-format +msgid "No network for bridge %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1050 +#, python-format +msgid "No network for instance %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1180 +#, python-format +msgid "Token %s does not exist" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1205 +#, python-format +msgid "No quota for project_id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1356 +#, python-format +msgid "No volume for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1401 +#, python-format +msgid "Volume %s not found" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1413 +#, python-format +msgid "No export device found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1426 +#, python-format +msgid "No target id found for volume %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1471 +#, python-format +msgid "No security group with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1488 +#, python-format +msgid "No security group named %s for project: %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1576 +#, python-format +msgid "No secuity group rule with id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1650 +#, python-format +msgid "No user for id %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1666 +#, python-format +msgid "No user for access key %s" +msgstr "" + +#: nova/db/sqlalchemy/api.py:1728 +#, python-format +msgid "No project with id %s" +msgstr "" + +#: nova/image/glance.py:78 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images" +msgstr "" + +#: nova/image/glance.py:97 +#, python-format +msgid "Parallax returned HTTP error %d from request for /images/detail" +msgstr "" + +#: nova/image/s3.py:82 +#, python-format +msgid "Image %s could not be found" +msgstr "" + +#: nova/network/api.py:39 +#, python-format +msgid "Quota exceeeded for %s, tried to allocate address" +msgstr "" + +#: nova/network/api.py:42 +msgid "Address quota exceeded. You cannot allocate any more addresses" +msgstr "" + +#: nova/network/linux_net.py:176 +#, python-format +msgid "Starting VLAN inteface %s" +msgstr "" + +#: nova/network/linux_net.py:186 +#, python-format +msgid "Starting Bridge interface for %s" +msgstr "" + +#: nova/network/linux_net.py:254 +#, python-format +msgid "Hupping dnsmasq threw %s" +msgstr "" + +#: nova/network/linux_net.py:256 +#, python-format +msgid "Pid %d is stale, relaunching dnsmasq" +msgstr "" + +#: nova/network/linux_net.py:334 +#, python-format +msgid "Killing dnsmasq threw %s" +msgstr "" + +#: nova/network/manager.py:135 +msgid "setting network host" +msgstr "" + +#: nova/network/manager.py:190 +#, python-format +msgid "Leasing IP %s" +msgstr "" + +#: nova/network/manager.py:194 +#, python-format +msgid "IP %s leased that isn't associated" +msgstr "" + +#: nova/network/manager.py:197 +#, python-format +msgid "IP %s leased to bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:205 +#, python-format +msgid "IP %s leased that was already deallocated" +msgstr "" + +#: nova/network/manager.py:214 +#, python-format +msgid "IP %s released that isn't associated" +msgstr "" + +#: nova/network/manager.py:217 +#, python-format +msgid "IP %s released from bad mac %s vs %s" +msgstr "" + +#: nova/network/manager.py:220 +#, python-format +msgid "IP %s released that was not leased" +msgstr "" + +#: nova/network/manager.py:442 +#, python-format +msgid "Dissassociated %s stale fixed ip(s)" +msgstr "" + +#: nova/objectstore/handler.py:106 +#, python-format +msgid "Unknown S3 value type %r" +msgstr "" + +#: nova/objectstore/handler.py:137 +msgid "Authenticated request" +msgstr "" + +#: nova/objectstore/handler.py:182 +msgid "List of buckets requested" +msgstr "" + +#: nova/objectstore/handler.py:209 +#, python-format +msgid "List keys for bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:217 +#, python-format +msgid "Unauthorized attempt to access bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:235 +#, python-format +msgid "Creating bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:245 +#, python-format +msgid "Deleting bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:249 +#, python-format +msgid "Unauthorized attempt to delete bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:271 +#, python-format +msgid "Getting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:274 +#, python-format +msgid "Unauthorized attempt to get object %s from bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:292 +#, python-format +msgid "Putting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:295 +#, python-format +msgid "Unauthorized attempt to upload object %s to bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:314 +#, python-format +msgid "Deleting object: %s / %s" +msgstr "" + +#: nova/objectstore/handler.py:393 +#, python-format +msgid "Not authorized to upload image: invalid directory %s" +msgstr "" + +#: nova/objectstore/handler.py:401 +#, python-format +msgid "Not authorized to upload image: unauthorized bucket %s" +msgstr "" + +#: nova/objectstore/handler.py:406 +#, python-format +msgid "Starting image upload: %s" +msgstr "" + +#: nova/objectstore/handler.py:420 +#, python-format +msgid "Not authorized to update attributes of image %s" +msgstr "" + +#: nova/objectstore/handler.py:428 +#, python-format +msgid "Toggling publicity flag of image %s %r" +msgstr "" + +#: nova/objectstore/handler.py:433 +#, python-format +msgid "Updating user fields on image %s" +msgstr "" + +#: nova/objectstore/handler.py:447 +#, python-format +msgid "Unauthorized attempt to delete image %s" +msgstr "" + +#: nova/objectstore/handler.py:452 +#, python-format +msgid "Deleted image: %s" +msgstr "" + +#: nova/scheduler/chance.py:37 nova/scheduler/simple.py:73 +#: nova/scheduler/simple.py:106 nova/scheduler/simple.py:118 +msgid "No hosts found" +msgstr "" + +#: nova/scheduler/driver.py:66 +msgid "Must implement a fallback schedule" +msgstr "" + +#: nova/scheduler/manager.py:69 +#, python-format +msgid "Casting to %s %s for %s" +msgstr "" + +#: nova/scheduler/simple.py:63 +msgid "All hosts have too many cores" +msgstr "" + +#: nova/scheduler/simple.py:95 +msgid "All hosts have too many gigabytes" +msgstr "" + +#: nova/scheduler/simple.py:115 +msgid "All hosts have too many networks" +msgstr "" + +#: nova/tests/test_cloud.py:198 +msgid "Can't test instances without a real virtual env." +msgstr "" + +#: nova/tests/test_cloud.py:210 +#, python-format +msgid "Need to watch instance %s until it's running..." +msgstr "" + +#: nova/tests/test_compute.py:104 +#, python-format +msgid "Running instances: %s" +msgstr "" + +#: nova/tests/test_compute.py:110 +#, python-format +msgid "After terminating instances: %s" +msgstr "" + +#: nova/tests/test_rpc.py:89 +#, python-format +msgid "Nested received %s, %s" +msgstr "" + +#: nova/tests/test_rpc.py:94 +#, python-format +msgid "Nested return %s" +msgstr "" + +#: nova/tests/test_rpc.py:119 nova/tests/test_rpc.py:125 +#, python-format +msgid "Received %s" +msgstr "" + +#: nova/tests/test_volume.py:162 +#, python-format +msgid "Target %s allocated" +msgstr "" + +#: nova/virt/connection.py:73 +msgid "Failed to open connection to the hypervisor" +msgstr "" + +#: nova/virt/fake.py:210 +#, python-format +msgid "Instance %s Not Found" +msgstr "" + +#: nova/virt/hyperv.py:118 +msgid "In init host" +msgstr "" + +#: nova/virt/hyperv.py:131 +#, python-format +msgid "Attempt to create duplicate vm %s" +msgstr "" + +#: nova/virt/hyperv.py:148 +#, python-format +msgid "Starting VM %s " +msgstr "" + +#: nova/virt/hyperv.py:150 +#, python-format +msgid "Started VM %s " +msgstr "" + +#: nova/virt/hyperv.py:152 +#, python-format +msgid "spawn vm failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:169 +#, python-format +msgid "Failed to create VM %s" +msgstr "" + +#: nova/virt/hyperv.py:171 nova/virt/xenapi/vm_utils.py:125 +#, python-format +msgid "Created VM %s..." +msgstr "" + +#: nova/virt/hyperv.py:188 +#, python-format +msgid "Set memory for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:198 +#, python-format +msgid "Set vcpus for vm %s..." +msgstr "" + +#: nova/virt/hyperv.py:202 +#, python-format +msgid "Creating disk for %s by attaching disk file %s" +msgstr "" + +#: nova/virt/hyperv.py:227 +#, python-format +msgid "Failed to add diskdrive to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:230 +#, python-format +msgid "New disk drive path is %s" +msgstr "" + +#: nova/virt/hyperv.py:247 +#, python-format +msgid "Failed to add vhd file to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:249 +#, python-format +msgid "Created disk for %s" +msgstr "" + +#: nova/virt/hyperv.py:253 +#, python-format +msgid "Creating nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:272 +msgid "Failed creating a port on the external vswitch" +msgstr "" + +#: nova/virt/hyperv.py:273 +#, python-format +msgid "Failed creating port for %s" +msgstr "" + +#: nova/virt/hyperv.py:275 +#, python-format +msgid "Created switch port %s on switch %s" +msgstr "" + +#: nova/virt/hyperv.py:285 +#, python-format +msgid "Failed to add nic to VM %s" +msgstr "" + +#: nova/virt/hyperv.py:287 +#, python-format +msgid "Created nic for %s " +msgstr "" + +#: nova/virt/hyperv.py:320 +#, python-format +msgid "WMI job failed: %s" +msgstr "" + +#: nova/virt/hyperv.py:322 +#, python-format +msgid "WMI job succeeded: %s, Elapsed=%s " +msgstr "" + +#: nova/virt/hyperv.py:358 +#, python-format +msgid "Got request to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:383 +#, python-format +msgid "Failed to destroy vm %s" +msgstr "" + +#: nova/virt/hyperv.py:389 +#, python-format +msgid "Del: disk %s vm %s" +msgstr "" + +#: nova/virt/hyperv.py:405 +#, python-format +msgid "" +"Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, " +"cpu_time=%s" +msgstr "" + +#: nova/virt/hyperv.py:424 nova/virt/xenapi/vm_utils.py:301 +#, python-format +msgid "duplicate name found: %s" +msgstr "" + +#: nova/virt/hyperv.py:444 +#, python-format +msgid "Successfully changed vm state of %s to %s" +msgstr "" + +#: nova/virt/hyperv.py:447 nova/virt/hyperv.py:449 +#, python-format +msgid "Failed to change vm state of %s to %s" +msgstr "" + +#: nova/virt/images.py:70 +#, python-format +msgid "Finished retreving %s -- placed in %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:144 +#, python-format +msgid "Connecting to libvirt: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:157 +msgid "Connection to libvirt broke" +msgstr "" + +#: nova/virt/libvirt_conn.py:229 +#, python-format +msgid "instance %s: deleting instance files %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:271 +#, python-format +msgid "No disk at %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:278 +msgid "Instance snapshotting is not supported for libvirtat this time" +msgstr "" + +#: nova/virt/libvirt_conn.py:294 +#, python-format +msgid "instance %s: rebooted" +msgstr "" + +#: nova/virt/libvirt_conn.py:297 +#, python-format +msgid "_wait_for_reboot failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:340 +#, python-format +msgid "instance %s: rescued" +msgstr "" + +#: nova/virt/libvirt_conn.py:343 +#, python-format +msgid "_wait_for_rescue failed: %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:370 +#, python-format +msgid "instance %s: is running" +msgstr "" + +#: nova/virt/libvirt_conn.py:381 +#, python-format +msgid "instance %s: booted" +msgstr "" + +#: nova/virt/libvirt_conn.py:384 nova/virt/xenapi/vmops.py:116 +#, python-format +msgid "instance %s: failed to boot" +msgstr "" + +#: nova/virt/libvirt_conn.py:395 +#, python-format +msgid "virsh said: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:399 +msgid "cool, it's a device" +msgstr "" + +#: nova/virt/libvirt_conn.py:407 +#, python-format +msgid "data: %r, fpath: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:415 +#, python-format +msgid "Contents of file %s: %r" +msgstr "" + +#: nova/virt/libvirt_conn.py:449 +#, python-format +msgid "instance %s: Creating image" +msgstr "" + +#: nova/virt/libvirt_conn.py:505 +#, python-format +msgid "instance %s: injecting key into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:508 +#, python-format +msgid "instance %s: injecting net into image %s" +msgstr "" + +#: nova/virt/libvirt_conn.py:516 +#, python-format +msgid "instance %s: ignoring error injecting data into image %s (%s)" +msgstr "" + +#: nova/virt/libvirt_conn.py:544 nova/virt/libvirt_conn.py:547 +#, python-format +msgid "instance %s: starting toXML method" +msgstr "" + +#: nova/virt/libvirt_conn.py:589 +#, python-format +msgid "instance %s: finished toXML method" +msgstr "" + +#: nova/virt/xenapi_conn.py:113 +msgid "" +"Must specify xenapi_connection_url, xenapi_connection_username " +"(optionally), and xenapi_connection_password to use " +"connection_type=xenapi" +msgstr "" + +#: nova/virt/xenapi_conn.py:263 +#, python-format +msgid "Task [%s] %s status: success %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:271 +#, python-format +msgid "Task [%s] %s status: %s %s" +msgstr "" + +#: nova/virt/xenapi_conn.py:287 nova/virt/xenapi_conn.py:300 +#, python-format +msgid "Got exception: %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:72 +#, python-format +msgid "%s: _db_content => %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:247 nova/virt/xenapi/fake.py:338 +#: nova/virt/xenapi/fake.py:356 nova/virt/xenapi/fake.py:404 +msgid "Raising NotImplemented" +msgstr "" + +#: nova/virt/xenapi/fake.py:249 +#, python-format +msgid "xenapi.fake does not have an implementation for %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:283 +#, python-format +msgid "Calling %s %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:288 +#, python-format +msgid "Calling getter %s" +msgstr "" + +#: nova/virt/xenapi/fake.py:340 +#, python-format +msgid "" +"xenapi.fake does not have an implementation for %s or it has been called " +"with the wrong number of arguments" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:40 +#, python-format +msgid "Found non-unique network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/network_utils.py:43 +#, python-format +msgid "Found no network for bridge %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:127 +#, python-format +msgid "Created VM %s as %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:147 +#, python-format +msgid "Creating VBD for VM %s, VDI %s ... " +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:149 +#, python-format +msgid "Created VBD %s for VM %s, VDI %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:165 +#, python-format +msgid "VBD not found in instance %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:175 +#, python-format +msgid "Unable to unplug VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:187 +#, python-format +msgid "Unable to destroy VBD %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:202 +#, python-format +msgid "Creating VIF for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:205 +#, python-format +msgid "Created VIF %s for VM %s, network %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:216 +#, python-format +msgid "Snapshotting VM %s with label '%s'..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:229 +#, python-format +msgid "Created snapshot %s from VM %s." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:243 +#, python-format +msgid "Asking xapi to upload %s as '%s'" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:261 +#, python-format +msgid "Asking xapi to fetch %s as %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:279 +#, python-format +msgid "Looking up vdi %s for PV kernel" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:290 +#, python-format +msgid "PV Kernel in VDI:%d" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:318 +#, python-format +msgid "VDI %s is still available" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:331 +#, python-format +msgid "(VM_UTILS) xenserver vm state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:333 +#, python-format +msgid "(VM_UTILS) xenapi power_state -> |%s|" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:390 +#, python-format +msgid "VHD %s has parent %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:407 +#, python-format +msgid "Re-scanning SR %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:431 +#, python-format +msgid "Parent %s doesn't match original parent %s, waiting for coalesce..." +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:448 +#, python-format +msgid "No VDIs found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vm_utils.py:452 +#, python-format +msgid "Unexpected number of VDIs (%s) found for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:62 +#, python-format +msgid "Attempted to create non-unique name %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:99 +#, python-format +msgid "Starting VM %s..." +msgstr "" + +#: nova/virt/xenapi/vmops.py:101 +#, python-format +msgid "Spawning VM %s created %s." +msgstr "" + +#: nova/virt/xenapi/vmops.py:112 +#, python-format +msgid "Instance %s: booted" +msgstr "" + +#: nova/virt/xenapi/vmops.py:137 +#, python-format +msgid "Instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:166 +#, python-format +msgid "Starting snapshot for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:174 +#, python-format +msgid "Unable to Snapshot %s: %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:184 +#, python-format +msgid "Finished snapshot and upload for VM %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:252 +#, python-format +msgid "suspend: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:262 +#, python-format +msgid "resume: instance not present %s" +msgstr "" + +#: nova/virt/xenapi/vmops.py:271 +#, python-format +msgid "Instance not found %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:57 +#, python-format +msgid "Introducing %s..." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:74 +#, python-format +msgid "Introduced %s as %s." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:78 +msgid "Unable to create Storage Repository" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:90 +#, python-format +msgid "Unable to find SR from VBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:96 +#, python-format +msgid "Forgetting SR %s ... " +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:101 +#, python-format +msgid "Ignoring exception %s when getting PBDs for %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:107 +#, python-format +msgid "Ignoring exception %s when unplugging PBD %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:111 +#, python-format +msgid "Forgetting SR %s done." +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:113 +#, python-format +msgid "Ignoring exception %s when forgetting SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:123 +#, python-format +msgid "Unable to introduce VDI on SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:128 +#, python-format +msgid "Unable to get record of VDI %s on" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:146 +#, python-format +msgid "Unable to introduce VDI for SR %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:175 +#, python-format +msgid "Unable to obtain target information %s, %s" +msgstr "" + +#: nova/virt/xenapi/volume_utils.py:197 +#, python-format +msgid "Mountpoint cannot be translated: %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:51 +#, python-format +msgid "Attach_volume: %s, %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:69 +#, python-format +msgid "Unable to create VDI on SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:81 +#, python-format +msgid "Unable to use SR %s for instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:93 +#, python-format +msgid "Unable to attach volume to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:95 +#, python-format +msgid "Mountpoint %s attached to instance %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:106 +#, python-format +msgid "Detach_volume: %s, %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:113 +#, python-format +msgid "Unable to locate volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:121 +#, python-format +msgid "Unable to detach volume %s" +msgstr "" + +#: nova/virt/xenapi/volumeops.py:128 +#, python-format +msgid "Mountpoint %s detached from instance %s" +msgstr "" + +#: nova/volume/api.py:44 +#, python-format +msgid "Quota exceeeded for %s, tried to create %sG volume" +msgstr "" + +#: nova/volume/api.py:46 +#, python-format +msgid "Volume quota exceeded. You cannot create a volume of size %s" +msgstr "" + +#: nova/volume/api.py:70 nova/volume/api.py:95 +msgid "Volume status must be available" +msgstr "" + +#: nova/volume/api.py:97 +msgid "Volume is already attached" +msgstr "" + +#: nova/volume/api.py:103 +msgid "Volume is already detached" +msgstr "" + +#: nova/volume/driver.py:76 +#, python-format +msgid "Recovering from a failed execute. Try number %s" +msgstr "" + +#: nova/volume/driver.py:85 +#, python-format +msgid "volume group %s doesn't exist" +msgstr "" + +#: nova/volume/driver.py:210 +#, python-format +msgid "FAKE AOE: %s" +msgstr "" + +#: nova/volume/driver.py:315 +#, python-format +msgid "FAKE ISCSI: %s" +msgstr "" + +#: nova/volume/manager.py:85 +#, python-format +msgid "Re-exporting %s volumes" +msgstr "" + +#: nova/volume/manager.py:93 +#, python-format +msgid "volume %s: creating" +msgstr "" + +#: nova/volume/manager.py:102 +#, python-format +msgid "volume %s: creating lv of size %sG" +msgstr "" + +#: nova/volume/manager.py:106 +#, python-format +msgid "volume %s: creating export" +msgstr "" + +#: nova/volume/manager.py:113 +#, python-format +msgid "volume %s: created successfully" +msgstr "" + +#: nova/volume/manager.py:121 +msgid "Volume is still attached" +msgstr "" + +#: nova/volume/manager.py:123 +msgid "Volume is not local to this node" +msgstr "" + +#: nova/volume/manager.py:124 +#, python-format +msgid "volume %s: removing export" +msgstr "" + +#: nova/volume/manager.py:126 +#, python-format +msgid "volume %s: deleting" +msgstr "" + +#: nova/volume/manager.py:129 +#, python-format +msgid "volume %s: deleted successfully" +msgstr "" + diff --git a/nova/api/__init__.py b/nova/api/__init__.py index 26fed847bb15..0fedbbfadc35 100644 --- a/nova/api/__init__.py +++ b/nova/api/__init__.py @@ -15,97 +15,5 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. -""" -Root WSGI middleware for all API controllers. -**Related Flags** - -:osapi_subdomain: subdomain running the OpenStack API (default: api) -:ec2api_subdomain: subdomain running the EC2 API (default: ec2) - -""" -import logging - -import routes -import webob.dec - -from nova import flags -from nova import wsgi -from nova.api import ec2 -from nova.api import openstack -from nova.api.ec2 import metadatarequesthandler - - -flags.DEFINE_string('osapi_subdomain', 'api', - 'subdomain running the OpenStack API') -flags.DEFINE_string('ec2api_subdomain', 'ec2', - 'subdomain running the EC2 API') - -FLAGS = flags.FLAGS - - -class API(wsgi.Router): - """Routes top-level requests to the appropriate controller.""" - - def __init__(self, default_api): - osapi_subdomain = {'sub_domain': [FLAGS.osapi_subdomain]} - ec2api_subdomain = {'sub_domain': [FLAGS.ec2api_subdomain]} - if default_api == 'os': - osapi_subdomain = {} - elif default_api == 'ec2': - ec2api_subdomain = {} - mapper = routes.Mapper() - mapper.sub_domains = True - - mapper.connect("/", controller=self.osapi_versions, - conditions=osapi_subdomain) - mapper.connect("/v1.0/{path_info:.*}", controller=openstack.API(), - conditions=osapi_subdomain) - - mapper.connect("/", controller=self.ec2api_versions, - conditions=ec2api_subdomain) - mapper.connect("/services/{path_info:.*}", controller=ec2.API(), - conditions=ec2api_subdomain) - mrh = metadatarequesthandler.MetadataRequestHandler() - for s in ['/latest', - '/2009-04-04', - '/2008-09-01', - '/2008-02-01', - '/2007-12-15', - '/2007-10-10', - '/2007-08-29', - '/2007-03-01', - '/2007-01-19', - '/1.0']: - mapper.connect('%s/{path_info:.*}' % s, controller=mrh, - conditions=ec2api_subdomain) - - super(API, self).__init__(mapper) - - @webob.dec.wsgify - def osapi_versions(self, req): - """Respond to a request for all OpenStack API versions.""" - response = { - "versions": [ - dict(status="CURRENT", id="v1.0")]} - metadata = { - "application/xml": { - "attributes": dict(version=["status", "id"])}} - return wsgi.Serializer(req.environ, metadata).to_content_type(response) - - @webob.dec.wsgify - def ec2api_versions(self, req): - """Respond to a request for all EC2 versions.""" - # available api versions - versions = [ - '1.0', - '2007-01-19', - '2007-03-01', - '2007-08-29', - '2007-10-10', - '2007-12-15', - '2008-02-01', - '2008-09-01', - '2009-04-04', - ] - return ''.join('%s\n' % v for v in versions) +"""No-op __init__ for directory full of api goodies.""" diff --git a/nova/api/direct.py b/nova/api/direct.py new file mode 100644 index 000000000000..81b3ae202622 --- /dev/null +++ b/nova/api/direct.py @@ -0,0 +1,232 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Public HTTP interface that allows services to self-register. + +The general flow of a request is: + - Request is parsed into WSGI bits. + - Some middleware checks authentication. + - Routing takes place based on the URL to find a controller. + (/controller/method) + - Parameters are parsed from the request and passed to a method on the + controller as keyword arguments. + - Optionally 'json' is decoded to provide all the parameters. + - Actual work is done and a result is returned. + - That result is turned into json and returned. + +""" + +import inspect +import urllib + +import routes +import webob + +from nova import context +from nova import flags +from nova import utils +from nova import wsgi + + +ROUTES = {} + + +def register_service(path, handle): + ROUTES[path] = handle + + +class Router(wsgi.Router): + def __init__(self, mapper=None): + if mapper is None: + mapper = routes.Mapper() + + self._load_registered_routes(mapper) + super(Router, self).__init__(mapper=mapper) + + def _load_registered_routes(self, mapper): + for route in ROUTES: + mapper.connect('/%s/{action}' % route, + controller=ServiceWrapper(ROUTES[route])) + + +class DelegatedAuthMiddleware(wsgi.Middleware): + def process_request(self, request): + os_user = request.headers['X-OpenStack-User'] + os_project = request.headers['X-OpenStack-Project'] + context_ref = context.RequestContext(user=os_user, project=os_project) + request.environ['openstack.context'] = context_ref + + +class JsonParamsMiddleware(wsgi.Middleware): + def process_request(self, request): + if 'json' not in request.params: + return + + params_json = request.params['json'] + params_parsed = utils.loads(params_json) + params = {} + for k, v in params_parsed.iteritems(): + if k in ('self', 'context'): + continue + if k.startswith('_'): + continue + params[k] = v + + request.environ['openstack.params'] = params + + +class PostParamsMiddleware(wsgi.Middleware): + def process_request(self, request): + params_parsed = request.params + params = {} + for k, v in params_parsed.iteritems(): + if k in ('self', 'context'): + continue + if k.startswith('_'): + continue + params[k] = v + + request.environ['openstack.params'] = params + + +class Reflection(object): + """Reflection methods to list available methods.""" + def __init__(self): + self._methods = {} + self._controllers = {} + + def _gather_methods(self): + methods = {} + controllers = {} + for route, handler in ROUTES.iteritems(): + controllers[route] = handler.__doc__.split('\n')[0] + for k in dir(handler): + if k.startswith('_'): + continue + f = getattr(handler, k) + if not callable(f): + continue + + # bunch of ugly formatting stuff + argspec = inspect.getargspec(f) + args = [x for x in argspec[0] + if x != 'self' and x != 'context'] + defaults = argspec[3] and argspec[3] or [] + args_r = list(reversed(args)) + defaults_r = list(reversed(defaults)) + + args_out = [] + while args_r: + if defaults_r: + args_out.append((args_r.pop(0), + repr(defaults_r.pop(0)))) + else: + args_out.append((str(args_r.pop(0)),)) + + # if the method accepts keywords + if argspec[2]: + args_out.insert(0, ('**%s' % argspec[2],)) + + methods['/%s/%s' % (route, k)] = { + 'short_doc': f.__doc__.split('\n')[0], + 'doc': f.__doc__, + 'name': k, + 'args': list(reversed(args_out))} + + self._methods = methods + self._controllers = controllers + + def get_controllers(self, context): + """List available controllers.""" + if not self._controllers: + self._gather_methods() + + return self._controllers + + def get_methods(self, context): + """List available methods.""" + if not self._methods: + self._gather_methods() + + method_list = self._methods.keys() + method_list.sort() + methods = {} + for k in method_list: + methods[k] = self._methods[k]['short_doc'] + return methods + + def get_method_info(self, context, method): + """Get detailed information about a method.""" + if not self._methods: + self._gather_methods() + return self._methods[method] + + +class ServiceWrapper(wsgi.Controller): + def __init__(self, service_handle): + self.service_handle = service_handle + + @webob.dec.wsgify + def __call__(self, req): + arg_dict = req.environ['wsgiorg.routing_args'][1] + action = arg_dict['action'] + del arg_dict['action'] + + context = req.environ['openstack.context'] + # allow middleware up the stack to override the params + params = {} + if 'openstack.params' in req.environ: + params = req.environ['openstack.params'] + + # TODO(termie): do some basic normalization on methods + method = getattr(self.service_handle, action) + + result = method(context, **params) + if type(result) is dict or type(result) is list: + return self._serialize(result, req) + else: + return result + + +class Proxy(object): + """Pretend a Direct API endpoint is an object.""" + def __init__(self, app, prefix=None): + self.app = app + self.prefix = prefix + + def __do_request(self, path, context, **kwargs): + req = webob.Request.blank(path) + req.method = 'POST' + req.body = urllib.urlencode({'json': utils.dumps(kwargs)}) + req.environ['openstack.context'] = context + resp = req.get_response(self.app) + try: + return utils.loads(resp.body) + except Exception: + return resp.body + + def __getattr__(self, key): + if self.prefix is None: + return self.__class__(self.app, prefix=key) + + def _wrapper(context, **kwargs): + return self.__do_request('/%s/%s' % (self.prefix, key), + context, + **kwargs) + _wrapper.func_name = key + return _wrapper diff --git a/nova/api/ec2/__init__.py b/nova/api/ec2/__init__.py index aa3bfaeb4520..238cb0f38376 100644 --- a/nova/api/ec2/__init__.py +++ b/nova/api/ec2/__init__.py @@ -20,7 +20,7 @@ Starting point for routing EC2 requests. """ -import logging +import datetime import routes import webob import webob.dec @@ -29,19 +29,18 @@ import webob.exc from nova import context from nova import exception from nova import flags +from nova import log as logging +from nova import utils from nova import wsgi from nova.api.ec2 import apirequest -from nova.api.ec2 import admin -from nova.api.ec2 import cloud from nova.auth import manager FLAGS = flags.FLAGS +LOG = logging.getLogger("nova.api") flags.DEFINE_boolean('use_forwarded_for', False, 'Treat X-Forwarded-For as the canonical remote address. ' 'Only enable this if you have a sanitizing proxy.') -flags.DEFINE_boolean('use_lockout', False, - 'Whether or not to use lockout middleware.') flags.DEFINE_integer('lockout_attempts', 5, 'Number of failed auths before lockout.') flags.DEFINE_integer('lockout_minutes', 15, @@ -52,17 +51,42 @@ flags.DEFINE_list('lockout_memcached_servers', None, 'Memcached servers or None for in process cache.') -_log = logging.getLogger("api") -_log.setLevel(logging.DEBUG) +class RequestLogging(wsgi.Middleware): + """Access-Log akin logging for all EC2 API requests.""" + @webob.dec.wsgify + def __call__(self, req): + rv = req.get_response(self.application) + self.log_request_completion(rv, req) + return rv -class API(wsgi.Middleware): - """Routing for all EC2 API requests.""" - - def __init__(self): - self.application = Authenticate(Router(Authorizer(Executor()))) - if FLAGS.use_lockout: - self.application = Lockout(self.application) + def log_request_completion(self, response, request): + controller = request.environ.get('ec2.controller', None) + if controller: + controller = controller.__class__.__name__ + action = request.environ.get('ec2.action', None) + ctxt = request.environ.get('ec2.context', None) + seconds = 'X' + microseconds = 'X' + if ctxt: + delta = datetime.datetime.utcnow() - \ + ctxt.timestamp + seconds = delta.seconds + microseconds = delta.microseconds + LOG.info( + "%s.%ss %s %s %s %s:%s %s [%s] %s %s", + seconds, + microseconds, + request.remote_addr, + request.method, + request.path_info, + controller, + action, + response.status_int, + request.user_agent, + request.content_type, + response.content_type, + context=ctxt) class Lockout(wsgi.Middleware): @@ -98,7 +122,7 @@ class Lockout(wsgi.Middleware): failures_key = "authfailures-%s" % access_key failures = int(self.mc.get(failures_key) or 0) if failures >= FLAGS.lockout_attempts: - detail = "Too many failed authentications." + detail = _("Too many failed authentications.") raise webob.exc.HTTPForbidden(detail=detail) res = req.get_response(self.application) if res.status_int == 403: @@ -107,9 +131,9 @@ class Lockout(wsgi.Middleware): # NOTE(vish): To use incr, failures has to be a string. self.mc.set(failures_key, '1', time=FLAGS.lockout_window * 60) elif failures >= FLAGS.lockout_attempts: - _log.warn('Access key %s has had %d failed authentications' - ' and will be locked out for %d minutes.' % - (access_key, failures, FLAGS.lockout_minutes)) + LOG.warn(_('Access key %s has had %d failed authentications' + ' and will be locked out for %d minutes.'), + access_key, failures, FLAGS.lockout_minutes) self.mc.set(failures_key, str(failures), time=FLAGS.lockout_minutes * 60) return res @@ -142,8 +166,9 @@ class Authenticate(wsgi.Middleware): req.method, req.host, req.path) - except exception.Error, ex: - logging.debug(_("Authentication Failure: %s") % ex) + # Be explicit for what exceptions are 403, the rest bubble as 500 + except (exception.NotFound, exception.NotAuthorized) as ex: + LOG.audit(_("Authentication Failure: %s"), str(ex)) raise webob.exc.HTTPForbidden() # Authenticated! @@ -154,29 +179,19 @@ class Authenticate(wsgi.Middleware): project=project, remote_address=remote_address) req.environ['ec2.context'] = ctxt + LOG.audit(_('Authenticated Request For %s:%s)'), user.name, + project.name, context=req.environ['ec2.context']) return self.application -class Router(wsgi.Middleware): +class Requestify(wsgi.Middleware): - """Add ec2.'controller', .'action', and .'action_args' to WSGI environ.""" - - def __init__(self, application): - super(Router, self).__init__(application) - self.map = routes.Mapper() - self.map.connect("/{controller_name}/") - self.controllers = dict(Cloud=cloud.CloudController(), - Admin=admin.AdminController()) + def __init__(self, app, controller): + super(Requestify, self).__init__(app) + self.controller = utils.import_class(controller)() @webob.dec.wsgify def __call__(self, req): - # Obtain the appropriate controller and action for this request. - try: - match = self.map.match(req.path_info) - controller_name = match['controller_name'] - controller = self.controllers[controller_name] - except: - raise webob.exc.HTTPNotFound() non_args = ['Action', 'Signature', 'AWSAccessKeyId', 'SignatureMethod', 'SignatureVersion', 'Version', 'Timestamp'] args = dict(req.params) @@ -189,13 +204,13 @@ class Router(wsgi.Middleware): except: raise webob.exc.HTTPBadRequest() - _log.debug(_('action: %s') % action) + LOG.debug(_('action: %s'), action) for key, value in args.items(): - _log.debug(_('arg: %s\t\tval: %s') % (key, value)) + LOG.debug(_('arg: %s\t\tval: %s'), key, value) # Success! - req.environ['ec2.controller'] = controller - req.environ['ec2.action'] = action + api_request = apirequest.APIRequest(self.controller, action, args) + req.environ['ec2.request'] = api_request req.environ['ec2.action_args'] = args return self.application @@ -256,13 +271,14 @@ class Authorizer(wsgi.Middleware): @webob.dec.wsgify def __call__(self, req): context = req.environ['ec2.context'] - controller_name = req.environ['ec2.controller'].__class__.__name__ - action = req.environ['ec2.action'] - allowed_roles = self.action_roles[controller_name].get(action, - ['none']) + controller = req.environ['ec2.request'].controller.__class__.__name__ + action = req.environ['ec2.request'].action + allowed_roles = self.action_roles[controller].get(action, ['none']) if self._matches_any_role(context, allowed_roles): return self.application else: + LOG.audit(_("Unauthorized request for controller=%s " + "and action=%s"), controller, action, context=context) raise webob.exc.HTTPUnauthorized() def _matches_any_role(self, context, roles): @@ -289,23 +305,28 @@ class Executor(wsgi.Application): @webob.dec.wsgify def __call__(self, req): context = req.environ['ec2.context'] - controller = req.environ['ec2.controller'] - action = req.environ['ec2.action'] - args = req.environ['ec2.action_args'] - - api_request = apirequest.APIRequest(controller, action) + api_request = req.environ['ec2.request'] result = None try: - result = api_request.send(context, **args) + result = api_request.invoke(context) + except exception.NotFound as ex: + LOG.info(_('NotFound raised: %s'), str(ex), context=context) + return self._error(req, context, type(ex).__name__, str(ex)) except exception.ApiError as ex: - + LOG.exception(_('ApiError raised: %s'), str(ex), context=context) if ex.code: - return self._error(req, ex.code, ex.message) + return self._error(req, context, ex.code, str(ex)) else: - return self._error(req, type(ex).__name__, ex.message) - # TODO(vish): do something more useful with unknown exceptions + return self._error(req, context, type(ex).__name__, str(ex)) except Exception as ex: - return self._error(req, type(ex).__name__, str(ex)) + extra = {'environment': req.environ} + LOG.exception(_('Unexpected error raised: %s'), str(ex), + extra=extra, context=context) + return self._error(req, + context, + 'UnknownError', + _('An unknown error has occurred. ' + 'Please try your request again.')) else: resp = webob.Response() resp.status = 200 @@ -313,15 +334,16 @@ class Executor(wsgi.Application): resp.body = str(result) return resp - def _error(self, req, code, message): - logging.error("%s: %s", code, message) + def _error(self, req, context, code, message): + LOG.error("%s: %s", code, message, context=context) resp = webob.Response() resp.status = 400 resp.headers['Content-Type'] = 'text/xml' resp.body = str('\n' - '%s' - '%s' - '?' % (code, message)) + '%s' + '%s' + '%s' % + (code, message, context.request_id)) return resp @@ -343,29 +365,3 @@ class Versions(wsgi.Application): '2009-04-04', ] return ''.join('%s\n' % v for v in versions) - - -def authenticate_factory(global_args, **local_args): - def authenticator(app): - return Authenticate(app) - return authenticator - - -def router_factory(global_args, **local_args): - def router(app): - return Router(app) - return router - - -def authorizer_factory(global_args, **local_args): - def authorizer(app): - return Authorizer(app) - return authorizer - - -def executor_factory(global_args, **local_args): - return Executor() - - -def versions_factory(global_args, **local_args): - return Versions() diff --git a/nova/api/ec2/admin.py b/nova/api/ec2/admin.py index fac01369eca1..758b612e80fb 100644 --- a/nova/api/ec2/admin.py +++ b/nova/api/ec2/admin.py @@ -24,9 +24,13 @@ import base64 from nova import db from nova import exception +from nova import log as logging from nova.auth import manager +LOG = logging.getLogger('nova.api.ec2.admin') + + def user_dict(user, base64_file=None): """Convert the user object to a result dict""" if user: @@ -75,17 +79,18 @@ class AdminController(object): return {'userSet': [user_dict(u) for u in manager.AuthManager().get_users()]} - def register_user(self, _context, name, **_kwargs): + def register_user(self, context, name, **_kwargs): """Creates a new user, and returns generated credentials.""" + LOG.audit(_("Creating new user: %s"), name, context=context) return user_dict(manager.AuthManager().create_user(name)) - def deregister_user(self, _context, name, **_kwargs): + def deregister_user(self, context, name, **_kwargs): """Deletes a single user (NOT undoable.) Should throw an exception if the user has instances, volumes, or buckets remaining. """ + LOG.audit(_("Deleting user: %s"), name, context=context) manager.AuthManager().delete_user(name) - return True def describe_roles(self, context, project_roles=True, **kwargs): @@ -105,15 +110,27 @@ class AdminController(object): operation='add', **kwargs): """Add or remove a role for a user and project.""" if operation == 'add': + if project: + LOG.audit(_("Adding role %s to user %s for project %s"), role, + user, project, context=context) + else: + LOG.audit(_("Adding sitewide role %s to user %s"), role, user, + context=context) manager.AuthManager().add_role(user, role, project) elif operation == 'remove': + if project: + LOG.audit(_("Removing role %s from user %s for project %s"), + role, user, project, context=context) + else: + LOG.audit(_("Removing sitewide role %s from user %s"), role, + user, context=context) manager.AuthManager().remove_role(user, role, project) else: - raise exception.ApiError('operation must be add or remove') + raise exception.ApiError(_('operation must be add or remove')) return True - def generate_x509_for_user(self, _context, name, project=None, **kwargs): + def generate_x509_for_user(self, context, name, project=None, **kwargs): """Generates and returns an x509 certificate for a single user. Is usually called from a client that will wrap this with access and secret key info, and return a zip file. @@ -122,6 +139,8 @@ class AdminController(object): project = name project = manager.AuthManager().get_project(project) user = manager.AuthManager().get_user(name) + LOG.audit(_("Getting x509 for user: %s on project: %s"), name, + project, context=context) return user_dict(user, base64.b64encode(project.get_credentials(user))) def describe_project(self, context, name, **kwargs): @@ -137,6 +156,8 @@ class AdminController(object): def register_project(self, context, name, manager_user, description=None, member_users=None, **kwargs): """Creates a new project""" + LOG.audit(_("Create project %s managed by %s"), name, manager_user, + context=context) return project_dict( manager.AuthManager().create_project( name, @@ -146,6 +167,7 @@ class AdminController(object): def deregister_project(self, context, name): """Permanently deletes a project.""" + LOG.audit(_("Delete project: %s"), name, context=context) manager.AuthManager().delete_project(name) return True @@ -159,11 +181,15 @@ class AdminController(object): **kwargs): """Add or remove a user from a project.""" if operation == 'add': + LOG.audit(_("Adding user %s to project %s"), user, project, + context=context) manager.AuthManager().add_to_project(user, project) elif operation == 'remove': + LOG.audit(_("Removing user %s from project %s"), user, project, + context=context) manager.AuthManager().remove_from_project(user, project) else: - raise exception.ApiError('operation must be add or remove') + raise exception.ApiError(_('operation must be add or remove')) return True # FIXME(vish): these host commands don't work yet, perhaps some of the diff --git a/nova/api/ec2/apirequest.py b/nova/api/ec2/apirequest.py index a90fbeb0c2b5..78576470a568 100644 --- a/nova/api/ec2/apirequest.py +++ b/nova/api/ec2/apirequest.py @@ -20,13 +20,13 @@ APIRequest class """ -import logging import re # TODO(termie): replace minidom with etree from xml.dom import minidom -_log = logging.getLogger("api") -_log.setLevel(logging.DEBUG) +from nova import log as logging + +LOG = logging.getLogger("nova.api.request") _c2u = re.compile('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))') @@ -83,24 +83,25 @@ def _try_convert(value): class APIRequest(object): - def __init__(self, controller, action): + def __init__(self, controller, action, args): self.controller = controller self.action = action + self.args = args - def send(self, context, **kwargs): + def invoke(self, context): try: method = getattr(self.controller, _camelcase_to_underscore(self.action)) except AttributeError: _error = _('Unsupported API request: controller = %s,' 'action = %s') % (self.controller, self.action) - _log.warning(_error) + LOG.exception(_error) # TODO: Raise custom exception, trap in apiserver, # and reraise as 400 error. raise Exception(_error) args = {} - for key, value in kwargs.items(): + for key, value in self.args.items(): parts = key.split(".") key = _camelcase_to_underscore(parts[0]) if isinstance(value, str) or isinstance(value, unicode): @@ -142,7 +143,7 @@ class APIRequest(object): response = xml.toxml() xml.unlink() - _log.debug(response) + LOG.debug(response) return response def _render_dict(self, xml, el, data): @@ -151,7 +152,7 @@ class APIRequest(object): val = data[key] el.appendChild(self._render_data(xml, key, val)) except: - _log.debug(data) + LOG.debug(data) raise def _render_data(self, xml, el_name, data): diff --git a/nova/api/ec2/cloud.py b/nova/api/ec2/cloud.py index 0c0027287228..57d41ed67be6 100644 --- a/nova/api/ec2/cloud.py +++ b/nova/api/ec2/cloud.py @@ -24,26 +24,28 @@ datastore. import base64 import datetime -import logging -import re -import os - -from nova import context import IPy +import os +import urllib from nova import compute +from nova import context + from nova import crypto from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import network -from nova import rpc from nova import utils from nova import volume from nova.compute import instance_types FLAGS = flags.FLAGS +flags.DECLARE('service_down_time', 'nova.scheduler.driver') + +LOG = logging.getLogger("nova.api.cloud") InvalidInputException = exception.InvalidInputException @@ -72,17 +74,13 @@ def _gen_key(context, user_id, key_name): def ec2_id_to_id(ec2_id): - """Convert an ec2 ID (i-[base 36 number]) to an instance id (int)""" - return int(ec2_id[2:], 36) + """Convert an ec2 ID (i-[base 16 number]) to an instance id (int)""" + return int(ec2_id.split('-')[-1], 16) -def id_to_ec2_id(instance_id): - """Convert an instance ID (int) to an ec2 ID (i-[base 36 number])""" - digits = [] - while instance_id != 0: - instance_id, remainder = divmod(instance_id, 36) - digits.append('0123456789abcdefghijklmnopqrstuvwxyz'[remainder]) - return "i-%s" % ''.join(reversed(digits)) +def id_to_ec2_id(instance_id, template='i-%08x'): + """Convert an instance ID (int) to an ec2 ID (i-[base 16 number])""" + return template % instance_id class CloudController(object): @@ -94,8 +92,11 @@ class CloudController(object): self.image_service = utils.import_object(FLAGS.image_service) self.network_api = network.API() self.volume_api = volume.API() - self.compute_api = compute.API(self.image_service, self.network_api, - self.volume_api) + self.compute_api = compute.API( + network_api=self.network_api, + image_service=self.image_service, + volume_api=self.volume_api, + hostname_factory=id_to_ec2_id) self.setup() def __str__(self): @@ -131,14 +132,11 @@ class CloudController(object): result[key] = [line] return result - def _trigger_refresh_security_group(self, context, security_group): - nodes = set([instance['host'] for instance in security_group.instances - if instance['host'] is not None]) - for node in nodes: - rpc.cast(context, - '%s.%s' % (FLAGS.compute_topic, node), - {"method": "refresh_security_group", - "args": {"security_group_id": security_group.id}}) + def _get_availability_zone_by_host(self, context, host): + services = db.service_get_all_by_host(context, host) + if len(services) > 0: + return services[0]['availability_zone'] + return 'unknown zone' def get_metadata(self, address): ctxt = context.get_admin_context() @@ -152,6 +150,8 @@ class CloudController(object): else: keys = '' hostname = instance_ref['hostname'] + host = instance_ref['host'] + availability_zone = self._get_availability_zone_by_host(ctxt, host) floating_ip = db.instance_get_floating_address(ctxt, instance_ref['id']) ec2_id = id_to_ec2_id(instance_ref['id']) @@ -174,8 +174,7 @@ class CloudController(object): 'local-hostname': hostname, 'local-ipv4': address, 'kernel-id': instance_ref['kernel_id'], - # TODO(vish): real zone - 'placement': {'availability-zone': 'nova'}, + 'placement': {'availability-zone': availability_zone}, 'public-hostname': hostname, 'public-ipv4': floating_ip or '', 'public-keys': keys, @@ -199,15 +198,33 @@ class CloudController(object): return self._describe_availability_zones(context, **kwargs) def _describe_availability_zones(self, context, **kwargs): - return {'availabilityZoneInfo': [{'zoneName': 'nova', - 'zoneState': 'available'}]} + enabled_services = db.service_get_all(context) + disabled_services = db.service_get_all(context, True) + available_zones = [] + for zone in [service.availability_zone for service + in enabled_services]: + if not zone in available_zones: + available_zones.append(zone) + not_available_zones = [] + for zone in [service.availability_zone for service in disabled_services + if not service['availability_zone'] in available_zones]: + if not zone in not_available_zones: + not_available_zones.append(zone) + result = [] + for zone in available_zones: + result.append({'zoneName': zone, + 'zoneState': "available"}) + for zone in not_available_zones: + result.append({'zoneName': zone, + 'zoneState': "not available"}) + return {'availabilityZoneInfo': result} def _describe_availability_zones_verbose(self, context, **kwargs): rv = {'availabilityZoneInfo': [{'zoneName': 'nova', 'zoneState': 'available'}]} services = db.service_get_all(context) - now = db.get_time() + now = datetime.datetime.utcnow() hosts = [] for host in [service['host'] for service in services]: if not host in hosts: @@ -237,16 +254,17 @@ class CloudController(object): name, _sep, host = region.partition('=') endpoint = '%s://%s:%s%s' % (FLAGS.ec2_prefix, host, - FLAGS.cc_port, + FLAGS.ec2_port, FLAGS.ec2_suffix) regions.append({'regionName': name, 'regionEndpoint': endpoint}) else: regions = [{'regionName': 'nova', 'regionEndpoint': '%s://%s:%s%s' % (FLAGS.ec2_prefix, - FLAGS.cc_host, - FLAGS.cc_port, + FLAGS.ec2_host, + FLAGS.ec2_port, FLAGS.ec2_suffix)}] + return {'regionInfo': regions} def describe_snapshots(self, context, @@ -282,6 +300,7 @@ class CloudController(object): return {'keypairsSet': result} def create_key_pair(self, context, key_name, **kwargs): + LOG.audit(_("Create key pair %s"), key_name, context=context) data = _gen_key(context, context.user.id, key_name) return {'keyName': key_name, 'keyFingerprint': data['fingerprint'], @@ -289,6 +308,7 @@ class CloudController(object): # TODO(vish): when context is no longer an object, pass it here def delete_key_pair(self, context, key_name, **kwargs): + LOG.audit(_("Delete key pair %s"), key_name, context=context) try: db.key_pair_destroy(context, context.user.id, key_name) except exception.NotFound: @@ -349,6 +369,7 @@ class CloudController(object): values['group_id'] = source_security_group['id'] elif cidr_ip: # If this fails, it throws an exception. This is what we want. + cidr_ip = urllib.unquote(cidr_ip).decode() IPy.IP(cidr_ip) values['cidr'] = cidr_ip else: @@ -395,6 +416,8 @@ class CloudController(object): return False def revoke_security_group_ingress(self, context, group_name, **kwargs): + LOG.audit(_("Revoke security group ingress %s"), group_name, + context=context) self.compute_api.ensure_default_security_group(context) security_group = db.security_group_get_by_name(context, context.project_id, @@ -402,8 +425,8 @@ class CloudController(object): criteria = self._revoke_rule_args_to_dict(context, **kwargs) if criteria == None: - raise exception.ApiError(_("No rule for the specified " - "parameters.")) + raise exception.ApiError(_("Not enough parameters to build a " + "valid rule.")) for rule in security_group.rules: match = True @@ -412,7 +435,8 @@ class CloudController(object): match = False if match: db.security_group_rule_destroy(context, rule['id']) - self._trigger_refresh_security_group(context, security_group) + self.compute_api.trigger_security_group_rules_refresh(context, + security_group['id']) return True raise exception.ApiError(_("No rule for the specified parameters.")) @@ -421,12 +445,17 @@ class CloudController(object): # for these operations, so support for newer API versions # is sketchy. def authorize_security_group_ingress(self, context, group_name, **kwargs): + LOG.audit(_("Authorize security group ingress %s"), group_name, + context=context) self.compute_api.ensure_default_security_group(context) security_group = db.security_group_get_by_name(context, context.project_id, group_name) values = self._revoke_rule_args_to_dict(context, **kwargs) + if values is None: + raise exception.ApiError(_("Not enough parameters to build a " + "valid rule.")) values['parent_group_id'] = security_group.id if self._security_group_rule_exists(security_group, values): @@ -435,7 +464,8 @@ class CloudController(object): security_group_rule = db.security_group_rule_create(context, values) - self._trigger_refresh_security_group(context, security_group) + self.compute_api.trigger_security_group_rules_refresh(context, + security_group['id']) return True @@ -457,6 +487,7 @@ class CloudController(object): return source_project_id def create_security_group(self, context, group_name, group_description): + LOG.audit(_("Create Security Group %s"), group_name, context=context) self.compute_api.ensure_default_security_group(context) if db.security_group_exists(context, context.project_id, group_name): raise exception.ApiError(_('group %s already exists') % group_name) @@ -471,6 +502,7 @@ class CloudController(object): group_ref)]} def delete_security_group(self, context, group_name, **kwargs): + LOG.audit(_("Delete security group %s"), group_name, context=context) security_group = db.security_group_get_by_name(context, context.project_id, group_name) @@ -478,22 +510,26 @@ class CloudController(object): return True def get_console_output(self, context, instance_id, **kwargs): + LOG.audit(_("Get console output for instance %s"), instance_id, + context=context) # instance_id is passed in as a list of instances ec2_id = instance_id[0] instance_id = ec2_id_to_id(ec2_id) - instance_ref = self.compute_api.get(context, instance_id) - output = rpc.call(context, - '%s.%s' % (FLAGS.compute_topic, - instance_ref['host']), - {"method": "get_console_output", - "args": {"instance_id": instance_ref['id']}}) - + output = self.compute_api.get_console_output( + context, instance_id=instance_id) now = datetime.datetime.utcnow() return {"InstanceId": ec2_id, "Timestamp": now, "output": base64.b64encode(output)} + def get_ajax_console(self, context, instance_id, **kwargs): + ec2_id = instance_id[0] + internal_id = ec2_id_to_id(ec2_id) + return self.compute_api.get_ajax_console(context, internal_id) + def describe_volumes(self, context, volume_id=None, **kwargs): + if volume_id: + volume_id = [ec2_id_to_id(x) for x in volume_id] volumes = self.volume_api.get_all(context) # NOTE(vish): volume_id is an optional list of volume ids to filter by. volumes = [self._format_volume(context, v) for v in volumes @@ -509,7 +545,7 @@ class CloudController(object): instance_data = '%s[%s]' % (instance_ec2_id, volume['instance']['host']) v = {} - v['volumeId'] = volume['id'] + v['volumeId'] = id_to_ec2_id(volume['id'], 'vol-%08x') v['status'] = volume['status'] v['size'] = volume['size'] v['availabilityZone'] = volume['availability_zone'] @@ -527,7 +563,8 @@ class CloudController(object): 'device': volume['mountpoint'], 'instanceId': instance_ec2_id, 'status': 'attached', - 'volume_id': volume['ec2_id']}] + 'volumeId': id_to_ec2_id(volume['id'], + 'vol-%08x')}] else: v['attachmentSet'] = [{}] @@ -536,19 +573,22 @@ class CloudController(object): return v def create_volume(self, context, size, **kwargs): + LOG.audit(_("Create volume of %s GB"), size, context=context) volume = self.volume_api.create(context, size, kwargs.get('display_name'), kwargs.get('display_description')) # TODO(vish): Instance should be None at db layer instead of # trying to lazy load, but for now we turn it into # a dict to avoid an error. - return {'volumeSet': [self._format_volume(context, dict(volume_ref))]} + return {'volumeSet': [self._format_volume(context, dict(volume))]} def delete_volume(self, context, volume_id, **kwargs): - self.volume_api.delete(context, volume_id) + volume_id = ec2_id_to_id(volume_id) + self.volume_api.delete(context, volume_id=volume_id) return True def update_volume(self, context, volume_id, **kwargs): + volume_id = ec2_id_to_id(volume_id) updatable_fields = ['display_name', 'display_description'] changes = {} for field in updatable_fields: @@ -559,24 +599,33 @@ class CloudController(object): return True def attach_volume(self, context, volume_id, instance_id, device, **kwargs): - self.compute_api.attach_volume(context, instance_id, volume_id, device) + volume_id = ec2_id_to_id(volume_id) + instance_id = ec2_id_to_id(instance_id) + LOG.audit(_("Attach volume %s to instance %s at %s"), volume_id, + instance_id, device, context=context) + self.compute_api.attach_volume(context, + instance_id=instance_id, + volume_id=volume_id, + device=device) volume = self.volume_api.get(context, volume_id) return {'attachTime': volume['attach_time'], 'device': volume['mountpoint'], - 'instanceId': instance_id, + 'instanceId': id_to_ec2_id(instance_id), 'requestId': context.request_id, 'status': volume['attach_status'], - 'volumeId': volume_id} + 'volumeId': id_to_ec2_id(volume_id, 'vol-%08x')} def detach_volume(self, context, volume_id, **kwargs): + volume_id = ec2_id_to_id(volume_id) + LOG.audit(_("Detach volume %s"), volume_id, context=context) volume = self.volume_api.get(context, volume_id) - instance = self.compute_api.detach_volume(context, volume_id) + instance = self.compute_api.detach_volume(context, volume_id=volume_id) return {'attachTime': volume['attach_time'], 'device': volume['mountpoint'], 'instanceId': id_to_ec2_id(instance['id']), 'requestId': context.request_id, 'status': volume['attach_status'], - 'volumeId': volume_id} + 'volumeId': id_to_ec2_id(volume_id, 'vol-%08x')} def _convert_to_set(self, lst, label): if lst == None or lst == []: @@ -586,19 +635,32 @@ class CloudController(object): return [{label: x} for x in lst] def describe_instances(self, context, **kwargs): - return self._format_describe_instances(context) + return self._format_describe_instances(context, **kwargs) - def _format_describe_instances(self, context): - return {'reservationSet': self._format_instances(context)} + def describe_instances_v6(self, context, **kwargs): + kwargs['use_v6'] = True + return self._format_describe_instances(context, **kwargs) + + def _format_describe_instances(self, context, **kwargs): + return {'reservationSet': self._format_instances(context, **kwargs)} def _format_run_instances(self, context, reservation_id): - i = self._format_instances(context, reservation_id) + i = self._format_instances(context, reservation_id=reservation_id) assert len(i) == 1 return i[0] - def _format_instances(self, context, **kwargs): + def _format_instances(self, context, instance_id=None, **kwargs): + # TODO(termie): this method is poorly named as its name does not imply + # that it will be making a variety of database calls + # rather than simply formatting a bunch of instances that + # were handed to it reservations = {} - instances = self.compute_api.get_all(context, **kwargs) + # NOTE(vish): instance_id is an optional list of ids to filter by + if instance_id: + instance_id = [ec2_id_to_id(x) for x in instance_id] + instances = [self.compute_api.get(context, x) for x in instance_id] + else: + instances = self.compute_api.get_all(context, **kwargs) for instance in instances: if not context.user.is_admin(): if instance['image_id'] == FLAGS.vpn_image_id: @@ -618,10 +680,16 @@ class CloudController(object): if instance['fixed_ip']['floating_ips']: fixed = instance['fixed_ip'] floating_addr = fixed['floating_ips'][0]['address'] + if instance['fixed_ip']['network'] and 'use_v6' in kwargs: + i['dnsNameV6'] = utils.to_global_ipv6( + instance['fixed_ip']['network']['cidr_v6'], + instance['mac_address']) + i['privateDnsName'] = fixed_addr i['publicDnsName'] = floating_addr i['dnsName'] = i['publicDnsName'] or i['privateDnsName'] i['keyName'] = instance['key_name'] + if context.user.is_admin(): i['keyName'] = '%s (%s, %s)' % (i['keyName'], instance['project_id'], @@ -632,6 +700,9 @@ class CloudController(object): i['amiLaunchIndex'] = instance['launch_index'] i['displayName'] = instance['display_name'] i['displayDescription'] = instance['display_description'] + host = instance['host'] + zone = self._get_availability_zone_by_host(context, host) + i['placement'] = {'availabilityZone': zone} if instance['reservation_id'] not in reservations: r = {} r['reservationId'] = instance['reservation_id'] @@ -670,27 +741,35 @@ class CloudController(object): return {'addressesSet': addresses} def allocate_address(self, context, **kwargs): + LOG.audit(_("Allocate address"), context=context) public_ip = self.network_api.allocate_floating_ip(context) return {'addressSet': [{'publicIp': public_ip}]} def release_address(self, context, public_ip, **kwargs): + LOG.audit(_("Release address %s"), public_ip, context=context) self.network_api.release_floating_ip(context, public_ip) return {'releaseResponse': ["Address released."]} def associate_address(self, context, instance_id, public_ip, **kwargs): + LOG.audit(_("Associate address %s to instance %s"), public_ip, + instance_id, context=context) instance_id = ec2_id_to_id(instance_id) - self.compute_api.associate_floating_ip(context, instance_id, public_ip) + self.compute_api.associate_floating_ip(context, + instance_id=instance_id, + address=public_ip) return {'associateResponse': ["Address associated."]} def disassociate_address(self, context, public_ip, **kwargs): + LOG.audit(_("Disassociate address %s"), public_ip, context=context) self.network_api.disassociate_floating_ip(context, public_ip) return {'disassociateResponse': ["Address disassociated."]} def run_instances(self, context, **kwargs): max_count = int(kwargs.get('max_count', 1)) instances = self.compute_api.create(context, - instance_types.get_by_type(kwargs.get('instance_type', None)), - kwargs['image_id'], + instance_type=instance_types.get_by_type( + kwargs.get('instance_type', None)), + image_id=kwargs['image_id'], min_count=int(kwargs.get('min_count', max_count)), max_count=max_count, kernel_id=kwargs.get('kernel_id', None), @@ -701,37 +780,37 @@ class CloudController(object): user_data=kwargs.get('user_data'), security_group=kwargs.get('security_group'), availability_zone=kwargs.get('placement', {}).get( - 'AvailabilityZone'), - generate_hostname=id_to_ec2_id) + 'AvailabilityZone')) return self._format_run_instances(context, instances[0]['reservation_id']) def terminate_instances(self, context, instance_id, **kwargs): """Terminate each instance in instance_id, which is a list of ec2 ids. instance_id is a kwarg so its name cannot be modified.""" - logging.debug("Going to start terminating instances") + LOG.debug(_("Going to start terminating instances")) for ec2_id in instance_id: instance_id = ec2_id_to_id(ec2_id) - self.compute_api.delete(context, instance_id) + self.compute_api.delete(context, instance_id=instance_id) return True def reboot_instances(self, context, instance_id, **kwargs): """instance_id is a list of instance ids""" + LOG.audit(_("Reboot instance %r"), instance_id, context=context) for ec2_id in instance_id: instance_id = ec2_id_to_id(ec2_id) - self.compute_api.reboot(context, instance_id) + self.compute_api.reboot(context, instance_id=instance_id) return True def rescue_instance(self, context, instance_id, **kwargs): """This is an extension to the normal ec2_api""" instance_id = ec2_id_to_id(instance_id) - self.compute_api.rescue(context, instance_id) + self.compute_api.rescue(context, instance_id=instance_id) return True def unrescue_instance(self, context, instance_id, **kwargs): """This is an extension to the normal ec2_api""" instance_id = ec2_id_to_id(instance_id) - self.compute_api.unrescue(context, instance_id) + self.compute_api.unrescue(context, instance_id=instance_id) return True def update_instance(self, context, ec2_id, **kwargs): @@ -742,7 +821,7 @@ class CloudController(object): changes[field] = kwargs[field] if changes: instance_id = ec2_id_to_id(ec2_id) - self.compute_api.update(context, instance_id, **kwargs) + self.compute_api.update(context, instance_id=instance_id, **kwargs) return True def describe_images(self, context, image_id=None, **kwargs): @@ -753,6 +832,7 @@ class CloudController(object): return {'imagesSet': images} def deregister_image(self, context, image_id, **kwargs): + LOG.audit(_("De-registering image %s"), image_id, context=context) self.image_service.deregister(context, image_id) return {'imageId': image_id} @@ -760,7 +840,8 @@ class CloudController(object): if image_location is None and 'name' in kwargs: image_location = kwargs['name'] image_id = self.image_service.register(context, image_location) - logging.debug("Registered %s as %s" % (image_location, image_id)) + LOG.audit(_("Registered image %s with id %s"), image_location, + image_id, context=context) return {'imageId': image_id} def describe_image_attribute(self, context, image_id, attribute, **kwargs): @@ -788,6 +869,7 @@ class CloudController(object): raise exception.ApiError(_('only group "all" is supported')) if not operation_type in ['add', 'remove']: raise exception.ApiError(_('operation_type must be add or remove')) + LOG.audit(_("Updating image %s publicity"), image_id, context=context) return self.image_service.modify(context, image_id, operation_type) def update_image(self, context, image_id, **kwargs): diff --git a/nova/api/ec2/metadatarequesthandler.py b/nova/api/ec2/metadatarequesthandler.py index a57a6698a5d3..6fb441656b47 100644 --- a/nova/api/ec2/metadatarequesthandler.py +++ b/nova/api/ec2/metadatarequesthandler.py @@ -18,19 +18,20 @@ """Metadata request handler.""" -import logging - import webob.dec import webob.exc +from nova import log as logging from nova import flags +from nova import wsgi from nova.api.ec2 import cloud +LOG = logging.getLogger('nova.api.ec2.metadata') FLAGS = flags.FLAGS -class MetadataRequestHandler(object): +class MetadataRequestHandler(wsgi.Application): """Serve metadata from the EC2 API.""" def print_data(self, data): @@ -72,14 +73,9 @@ class MetadataRequestHandler(object): remote_address = req.headers.get('X-Forwarded-For', remote_address) meta_data = cc.get_metadata(remote_address) if meta_data is None: - logging.error(_('Failed to get metadata for ip: %s') % - remote_address) + LOG.error(_('Failed to get metadata for ip: %s'), remote_address) raise webob.exc.HTTPNotFound() data = self.lookup(req.path_info, meta_data) if data is None: raise webob.exc.HTTPNotFound() return self.print_data(data) - - -def metadata_factory(global_args, **local_args): - return MetadataRequestHandler() diff --git a/nova/api/openstack/__init__.py b/nova/api/openstack/__init__.py index a1430caedf1d..f2caac483625 100644 --- a/nova/api/openstack/__init__.py +++ b/nova/api/openstack/__init__.py @@ -20,59 +20,41 @@ WSGI middleware for OpenStack API controllers. """ -import time - -import logging import routes -import traceback import webob.dec import webob.exc -import webob -from nova import context from nova import flags -from nova import utils +from nova import log as logging from nova import wsgi from nova.api.openstack import faults from nova.api.openstack import backup_schedules +from nova.api.openstack import consoles from nova.api.openstack import flavors from nova.api.openstack import images -from nova.api.openstack import ratelimiting from nova.api.openstack import servers -from nova.api.openstack import sharedipgroups +from nova.api.openstack import shared_ip_groups +LOG = logging.getLogger('nova.api.openstack') FLAGS = flags.FLAGS -flags.DEFINE_string('os_api_auth', - 'nova.api.openstack.auth.AuthMiddleware', - 'The auth mechanism to use for the OpenStack API implemenation') - -flags.DEFINE_string('os_api_ratelimiting', - 'nova.api.openstack.ratelimiting.RateLimitingMiddleware', - 'Default ratelimiting implementation for the Openstack API') - +flags.DEFINE_string('os_krm_mapping_file', + 'krm_mapping.json', + 'Location of OpenStack Flavor/OS:EC2 Kernel/Ramdisk/Machine JSON file.') flags.DEFINE_bool('allow_admin_api', False, 'When True, this API service will accept admin operations.') -class API(wsgi.Middleware): - """WSGI entry point for all OpenStack API requests.""" - - def __init__(self): - auth_middleware = utils.import_class(FLAGS.os_api_auth) - ratelimiting_middleware = \ - utils.import_class(FLAGS.os_api_ratelimiting) - app = auth_middleware(ratelimiting_middleware(APIRouter())) - super(API, self).__init__(app) +class FaultWrapper(wsgi.Middleware): + """Calls down the middleware stack, making exceptions into faults.""" @webob.dec.wsgify def __call__(self, req): try: return req.get_response(self.application) except Exception as ex: - logging.warn(_("Caught error: %s") % str(ex)) - logging.error(traceback.format_exc()) + LOG.exception(_("Caught error: %s"), str(ex)) exc = webob.exc.HTTPInternalServerError(explanation=str(ex)) return faults.Fault(exc) @@ -83,12 +65,17 @@ class APIRouter(wsgi.Router): and method. """ + @classmethod + def factory(cls, global_config, **local_config): + """Simple paste factory, :class:`nova.wsgi.Router` doesn't have one""" + return cls() + def __init__(self): mapper = routes.Mapper() server_members = {'action': 'POST'} if FLAGS.allow_admin_api: - logging.debug("Including admin operations in API.") + LOG.debug(_("Including admin operations in API.")) server_members['pause'] = 'POST' server_members['unpause'] = 'POST' server_members["diagnostics"] = "GET" @@ -105,12 +92,18 @@ class APIRouter(wsgi.Router): parent_resource=dict(member_name='server', collection_name='servers')) + mapper.resource("console", "consoles", + controller=consoles.Controller(), + parent_resource=dict(member_name='server', + collection_name='servers')) + mapper.resource("image", "images", controller=images.Controller(), collection={'detail': 'GET'}) mapper.resource("flavor", "flavors", controller=flavors.Controller(), collection={'detail': 'GET'}) - mapper.resource("sharedipgroup", "sharedipgroups", - controller=sharedipgroups.Controller()) + mapper.resource("shared_ip_group", "shared_ip_groups", + collection={'detail': 'GET'}, + controller=shared_ip_groups.Controller()) super(APIRouter, self).__init__(mapper) @@ -126,11 +119,3 @@ class Versions(wsgi.Application): "application/xml": { "attributes": dict(version=["status", "id"])}} return wsgi.Serializer(req.environ, metadata).to_content_type(response) - - -def router_factory(global_cof, **local_conf): - return APIRouter() - - -def versions_factory(global_conf, **local_conf): - return Versions() diff --git a/nova/api/openstack/auth.py b/nova/api/openstack/auth.py index 00e817c8de86..1dfdd5318c2b 100644 --- a/nova/api/openstack/auth.py +++ b/nova/api/openstack/auth.py @@ -134,9 +134,3 @@ class AuthMiddleware(wsgi.Middleware): token = self.db.auth_create_token(ctxt, token_dict) return token, user return None, None - - -def auth_factory(global_conf, **local_conf): - def auth(app): - return AuthMiddleware(app) - return auth diff --git a/nova/api/openstack/backup_schedules.py b/nova/api/openstack/backup_schedules.py index fcc07bdd37d5..197125d86941 100644 --- a/nova/api/openstack/backup_schedules.py +++ b/nova/api/openstack/backup_schedules.py @@ -15,7 +15,9 @@ # License for the specific language governing permissions and limitations # under the License. +import logging import time + from webob import exc from nova import wsgi @@ -46,8 +48,8 @@ class Controller(wsgi.Controller): def create(self, req, server_id): """ No actual update method required, since the existing API allows both create and update through a POST """ - return faults.Fault(exc.HTTPNotFound()) + return faults.Fault(exc.HTTPNotImplemented()) def delete(self, req, server_id, id): """ Deletes an existing backup schedule """ - return faults.Fault(exc.HTTPNotFound()) + return faults.Fault(exc.HTTPNotImplemented()) diff --git a/nova/api/openstack/common.py b/nova/api/openstack/common.py index ac0572c96747..037ed47a0d8b 100644 --- a/nova/api/openstack/common.py +++ b/nova/api/openstack/common.py @@ -15,6 +15,8 @@ # License for the specific language governing permissions and limitations # under the License. +from nova import exception + def limited(items, req): """Return a slice of items according to requested offset and limit. @@ -34,3 +36,25 @@ def limited(items, req): limit = min(1000, limit) range_end = offset + limit return items[offset:range_end] + + +def get_image_id_from_image_hash(image_service, context, image_hash): + """Given an Image ID Hash, return an objectstore Image ID. + + image_service - reference to objectstore compatible image service. + context - security context for image service requests. + image_hash - hash of the image ID. + """ + + # FIX(sandy): This is terribly inefficient. It pulls all images + # from objectstore in order to find the match. ObjectStore + # should have a numeric counterpart to the string ID. + try: + items = image_service.detail(context) + except NotImplementedError: + items = image_service.index(context) + for image in items: + image_id = image['imageId'] + if abs(hash(image_id)) == int(image_hash): + return image_id + raise exception.NotFound(image_hash) diff --git a/nova/api/openstack/consoles.py b/nova/api/openstack/consoles.py new file mode 100644 index 000000000000..9ebdbe710ad6 --- /dev/null +++ b/nova/api/openstack/consoles.py @@ -0,0 +1,96 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 OpenStack LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from webob import exc + +from nova import console +from nova import exception +from nova import wsgi +from nova.api.openstack import faults + + +def _translate_keys(cons): + """Coerces a console instance into proper dictionary format """ + pool = cons['pool'] + info = {'id': cons['id'], + 'console_type': pool['console_type']} + return dict(console=info) + + +def _translate_detail_keys(cons): + """Coerces a console instance into proper dictionary format with + correctly mapped attributes """ + pool = cons['pool'] + info = {'id': cons['id'], + 'console_type': pool['console_type'], + 'password': cons['password'], + 'port': cons['port'], + 'host': pool['public_hostname']} + return dict(console=info) + + +class Controller(wsgi.Controller): + """The Consoles Controller for the Openstack API""" + + _serialization_metadata = { + 'application/xml': { + 'attributes': { + 'console': []}}} + + def __init__(self): + self.console_api = console.API() + super(Controller, self).__init__() + + def index(self, req, server_id): + """Returns a list of consoles for this instance""" + consoles = self.console_api.get_consoles( + req.environ['nova.context'], + int(server_id)) + return dict(consoles=[_translate_keys(console) + for console in consoles]) + + def create(self, req, server_id): + """Creates a new console""" + #info = self._deserialize(req.body, req) + self.console_api.create_console( + req.environ['nova.context'], + int(server_id)) + + def show(self, req, server_id, id): + """Shows in-depth information on a specific console""" + try: + console = self.console_api.get_console( + req.environ['nova.context'], + int(server_id), + int(id)) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + return _translate_detail_keys(console) + + def update(self, req, server_id, id): + """You can't update a console""" + raise faults.Fault(exc.HTTPNotImplemented()) + + def delete(self, req, server_id, id): + """Deletes a console""" + try: + self.console_api.delete_console(req.environ['nova.context'], + int(server_id), + int(id)) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + return exc.HTTPAccepted() diff --git a/nova/api/openstack/images.py b/nova/api/openstack/images.py index 0b239aab850d..9d56bc508afc 100644 --- a/nova/api/openstack/images.py +++ b/nova/api/openstack/images.py @@ -15,6 +15,8 @@ # License for the specific language governing permissions and limitations # under the License. +import logging + from webob import exc from nova import compute @@ -26,6 +28,7 @@ from nova.api.openstack import common from nova.api.openstack import faults import nova.image.service + FLAGS = flags.FLAGS @@ -75,7 +78,14 @@ def _translate_status(item): 'decrypting': 'preparing', 'untarring': 'saving', 'available': 'active'} - item['status'] = status_mapping[item['status']] + try: + item['status'] = status_mapping[item['status']] + except KeyError: + # TODO(sirp): Performing translation of status (if necessary) here for + # now. Perhaps this should really be done in EC2 API and + # S3ImageService + pass + return item @@ -88,6 +98,14 @@ def _filter_keys(item, keys): return dict((k, v) for k, v in item.iteritems() if k in keys) +def _convert_image_id_to_hash(image): + if 'imageId' in image: + # Convert EC2-style ID (i-blah) to Rackspace-style (int) + image_id = abs(hash(image['imageId'])) + image['imageId'] = image_id + image['id'] = image_id + + class Controller(wsgi.Controller): _serialization_metadata = { @@ -112,6 +130,9 @@ class Controller(wsgi.Controller): items = self._service.detail(req.environ['nova.context']) except NotImplementedError: items = self._service.index(req.environ['nova.context']) + for image in items: + _convert_image_id_to_hash(image) + items = common.limited(items, req) items = [_translate_keys(item) for item in items] items = [_translate_status(item) for item in items] @@ -119,7 +140,12 @@ class Controller(wsgi.Controller): def show(self, req, id): """Return data about the given image id""" - return dict(image=self._service.show(req.environ['nova.context'], id)) + image_id = common.get_image_id_from_image_hash(self._service, + req.environ['nova.context'], id) + + image = self._service.show(req.environ['nova.context'], image_id) + _convert_image_id_to_hash(image) + return dict(image=image) def delete(self, req, id): # Only public images are supported for now. @@ -130,7 +156,11 @@ class Controller(wsgi.Controller): env = self._deserialize(req.body, req) instance_id = env["image"]["serverId"] name = env["image"]["name"] - return compute.API().snapshot(context, instance_id, name) + + image_meta = compute.API().snapshot( + context, instance_id, name) + + return dict(image=image_meta) def update(self, req, id): # Users may not modify public images, and that's all that diff --git a/nova/api/openstack/ratelimiting/__init__.py b/nova/api/openstack/ratelimiting/__init__.py index 81b83142ffc0..cbb4b897eb9a 100644 --- a/nova/api/openstack/ratelimiting/__init__.py +++ b/nova/api/openstack/ratelimiting/__init__.py @@ -219,9 +219,3 @@ class WSGIAppProxy(object): # No delay return None return float(resp.getheader('X-Wait-Seconds')) - - -def ratelimit_factory(global_conf, **local_conf): - def rl(app): - return RateLimitingMiddleware(app) - return rl diff --git a/nova/api/openstack/servers.py b/nova/api/openstack/servers.py index ce64ac7ad2fb..8cbcebed2ec6 100644 --- a/nova/api/openstack/servers.py +++ b/nova/api/openstack/servers.py @@ -15,14 +15,17 @@ # License for the specific language governing permissions and limitations # under the License. -import logging +import json import traceback from webob import exc from nova import compute from nova import exception +from nova import flags +from nova import log as logging from nova import wsgi +from nova import utils from nova.api.openstack import common from nova.api.openstack import faults from nova.auth import manager as auth_manager @@ -35,6 +38,9 @@ LOG = logging.getLogger('server') LOG.setLevel(logging.DEBUG) +FLAGS = flags.FLAGS + + def _translate_detail_keys(inst): """ Coerces into dictionary format, mapping everything to Rackspace-like attributes for return""" @@ -44,7 +50,7 @@ def _translate_detail_keys(inst): power_state.RUNNING: 'active', power_state.BLOCKED: 'active', power_state.SUSPENDED: 'suspended', - power_state.PAUSED: 'error', + power_state.PAUSED: 'paused', power_state.SHUTDOWN: 'active', power_state.SHUTOFF: 'active', power_state.CRASHED: 'error'} @@ -81,6 +87,7 @@ class Controller(wsgi.Controller): def __init__(self): self.compute_api = compute.API() + self._image_service = utils.import_object(FLAGS.image_service) super(Controller, self).__init__() def index(self, req): @@ -117,6 +124,18 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPNotFound()) return exc.HTTPAccepted() + def _get_kernel_ramdisk_from_image(self, image_id): + mapping_filename = FLAGS.os_krm_mapping_file + + with open(mapping_filename) as f: + mapping = json.load(f) + if image_id in mapping: + return mapping[image_id] + + raise exception.NotFound( + _("No entry for image '%s' in mapping file '%s'") % + (image_id, mapping_filename)) + def create(self, req): """ Creates a new server for a given user """ env = self._deserialize(req.body, req) @@ -125,10 +144,15 @@ class Controller(wsgi.Controller): key_pair = auth_manager.AuthManager.get_key_pairs( req.environ['nova.context'])[0] + image_id = common.get_image_id_from_image_hash(self._image_service, + req.environ['nova.context'], env['server']['imageId']) + kernel_id, ramdisk_id = self._get_kernel_ramdisk_from_image(image_id) instances = self.compute_api.create( req.environ['nova.context'], instance_types.get_by_flavor_id(env['server']['flavorId']), - env['server']['imageId'], + image_id, + kernel_id=kernel_id, + ramdisk_id=ramdisk_id, display_name=env['server']['name'], display_description=env['server']['name'], key_name=key_pair['name'], @@ -141,15 +165,18 @@ class Controller(wsgi.Controller): if not inst_dict: return faults.Fault(exc.HTTPUnprocessableEntity()) + ctxt = req.environ['nova.context'] update_dict = {} if 'adminPass' in inst_dict['server']: update_dict['admin_pass'] = inst_dict['server']['adminPass'] + try: + self.compute_api.set_admin_password(ctxt, id) + except exception.TimeoutException, e: + return exc.HTTPRequestTimeout() if 'name' in inst_dict['server']: update_dict['display_name'] = inst_dict['server']['name'] - try: - self.compute_api.update(req.environ['nova.context'], id, - **update_dict) + self.compute_api.update(ctxt, id, **update_dict) except exception.NotFound: return faults.Fault(exc.HTTPNotFound()) return exc.HTTPNoContent() @@ -158,6 +185,7 @@ class Controller(wsgi.Controller): """ Multi-purpose method used to reboot, rebuild, and resize a server """ input_dict = self._deserialize(req.body, req) + #TODO(sandy): rebuild/resize not supported. try: reboot_type = input_dict['reboot']['type'] except Exception: @@ -170,6 +198,50 @@ class Controller(wsgi.Controller): return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() + def lock(self, req, id): + """ + lock the instance with id + admin only operation + + """ + context = req.environ['nova.context'] + try: + self.compute_api.lock(context, id) + except: + readable = traceback.format_exc() + LOG.exception(_("Compute.api::lock %s"), readable) + return faults.Fault(exc.HTTPUnprocessableEntity()) + return exc.HTTPAccepted() + + def unlock(self, req, id): + """ + unlock the instance with id + admin only operation + + """ + context = req.environ['nova.context'] + try: + self.compute_api.unlock(context, id) + except: + readable = traceback.format_exc() + LOG.exception(_("Compute.api::unlock %s"), readable) + return faults.Fault(exc.HTTPUnprocessableEntity()) + return exc.HTTPAccepted() + + def get_lock(self, req, id): + """ + return the boolean state of (instance with id)'s lock + + """ + context = req.environ['nova.context'] + try: + self.compute_api.get_lock(context, id) + except: + readable = traceback.format_exc() + LOG.exception(_("Compute.api::get_lock %s"), readable) + return faults.Fault(exc.HTTPUnprocessableEntity()) + return exc.HTTPAccepted() + def pause(self, req, id): """ Permit Admins to Pause the server. """ ctxt = req.environ['nova.context'] @@ -177,7 +249,7 @@ class Controller(wsgi.Controller): self.compute_api.pause(ctxt, id) except: readable = traceback.format_exc() - logging.error(_("Compute.api::pause %s"), readable) + LOG.exception(_("Compute.api::pause %s"), readable) return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() @@ -188,7 +260,7 @@ class Controller(wsgi.Controller): self.compute_api.unpause(ctxt, id) except: readable = traceback.format_exc() - logging.error(_("Compute.api::unpause %s"), readable) + LOG.exception(_("Compute.api::unpause %s"), readable) return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() @@ -199,7 +271,7 @@ class Controller(wsgi.Controller): self.compute_api.suspend(context, id) except: readable = traceback.format_exc() - logging.error(_("compute.api::suspend %s"), readable) + LOG.exception(_("compute.api::suspend %s"), readable) return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() @@ -210,10 +282,19 @@ class Controller(wsgi.Controller): self.compute_api.resume(context, id) except: readable = traceback.format_exc() - logging.error(_("compute.api::resume %s"), readable) + LOG.exception(_("compute.api::resume %s"), readable) return faults.Fault(exc.HTTPUnprocessableEntity()) return exc.HTTPAccepted() + def get_ajax_console(self, req, id): + """ Returns a url to an instance's ajaxterm console. """ + try: + self.compute_api.get_ajax_console(req.environ['nova.context'], + int(id)) + except exception.NotFound: + return faults.Fault(exc.HTTPNotFound()) + return exc.HTTPAccepted() + def diagnostics(self, req, id): """Permit Admins to retrieve server diagnostics.""" ctxt = req.environ["nova.context"] @@ -222,4 +303,13 @@ class Controller(wsgi.Controller): def actions(self, req, id): """Permit Admins to retrieve server actions.""" ctxt = req.environ["nova.context"] - return self.compute_api.get_actions(ctxt, id) + items = self.compute_api.get_actions(ctxt, id) + actions = [] + # TODO(jk0): Do not do pre-serialization here once the default + # serializer is updated + for item in items: + actions.append(dict( + created_at=str(item.created_at), + action=item.action, + error=item.error)) + return dict(actions=actions) diff --git a/nova/api/openstack/sharedipgroups.py b/nova/api/openstack/shared_ip_groups.py similarity index 91% rename from nova/api/openstack/sharedipgroups.py rename to nova/api/openstack/shared_ip_groups.py index 845f5beada4f..bd3cc23a8e83 100644 --- a/nova/api/openstack/sharedipgroups.py +++ b/nova/api/openstack/shared_ip_groups.py @@ -15,6 +15,8 @@ # License for the specific language governing permissions and limitations # under the License. +import logging + from webob import exc from nova import wsgi @@ -29,7 +31,7 @@ def _translate_keys(inst): def _translate_detail_keys(inst): """ Coerces a shared IP group instance into proper dictionary format with correctly mapped attributes """ - return dict(sharedIpGroup=inst) + return dict(sharedIpGroups=inst) class Controller(wsgi.Controller): @@ -54,12 +56,12 @@ class Controller(wsgi.Controller): def delete(self, req, id): """ Deletes a Shared IP Group """ - raise faults.Fault(exc.HTTPNotFound()) + raise faults.Fault(exc.HTTPNotImplemented()) - def detail(self, req, id): + def detail(self, req): """ Returns a complete list of Shared IP Groups """ return _translate_detail_keys({}) def create(self, req): """ Creates a new Shared IP group """ - raise faults.Fault(exc.HTTPNotFound()) + raise faults.Fault(exc.HTTPNotImplemented()) diff --git a/nova/auth/dbdriver.py b/nova/auth/dbdriver.py index 47e435cb6801..0eb6fe58862a 100644 --- a/nova/auth/dbdriver.py +++ b/nova/auth/dbdriver.py @@ -20,7 +20,6 @@ Auth driver using the DB as its backend. """ -import logging import sys from nova import context diff --git a/nova/auth/ldapdriver.py b/nova/auth/ldapdriver.py index 7616ff112b26..bc53e0ec6a72 100644 --- a/nova/auth/ldapdriver.py +++ b/nova/auth/ldapdriver.py @@ -24,11 +24,11 @@ other backends by creating another class that exposes the same public methods. """ -import logging import sys from nova import exception from nova import flags +from nova import log as logging FLAGS = flags.FLAGS @@ -65,6 +65,8 @@ flags.DEFINE_string('ldap_netadmin', flags.DEFINE_string('ldap_developer', 'cn=developers,ou=Groups,dc=example,dc=com', 'cn for Developers') +LOG = logging.getLogger("nova.ldapdriver") + # TODO(vish): make an abstract base class with the same public methods # to define a set interface for AuthDrivers. I'm delaying @@ -117,8 +119,7 @@ class LdapDriver(object): def get_project(self, pid): """Retrieve project by id""" - dn = 'cn=%s,%s' % (pid, - FLAGS.ldap_project_subtree) + dn = self.__project_to_dn(pid) attr = self.__find_object(dn, LdapDriver.project_pattern) return self.__to_project(attr) @@ -226,7 +227,8 @@ class LdapDriver(object): ('description', [description]), (LdapDriver.project_attribute, [manager_dn]), ('member', members)] - self.conn.add_s('cn=%s,%s' % (name, FLAGS.ldap_project_subtree), attr) + dn = self.__project_to_dn(name, search=False) + self.conn.add_s(dn, attr) return self.__to_project(dict(attr)) def modify_project(self, project_id, manager_uid=None, description=None): @@ -244,23 +246,22 @@ class LdapDriver(object): manager_dn)) if description: attr.append((self.ldap.MOD_REPLACE, 'description', description)) - self.conn.modify_s('cn=%s,%s' % (project_id, - FLAGS.ldap_project_subtree), - attr) + dn = self.__project_to_dn(project_id) + self.conn.modify_s(dn, attr) def add_to_project(self, uid, project_id): """Add user to project""" - dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) + dn = self.__project_to_dn(project_id) return self.__add_to_group(uid, dn) def remove_from_project(self, uid, project_id): """Remove user from project""" - dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) + dn = self.__project_to_dn(project_id) return self.__remove_from_group(uid, dn) def is_in_project(self, uid, project_id): """Check if user is in project""" - dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) + dn = self.__project_to_dn(project_id) return self.__is_in_group(uid, dn) def has_role(self, uid, role, project_id=None): @@ -300,7 +301,7 @@ class LdapDriver(object): roles.append(role) return roles else: - project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) + project_dn = self.__project_to_dn(project_id) query = ('(&(&(objectclass=groupOfNames)(!%s))(member=%s))' % (LdapDriver.project_pattern, self.__uid_to_dn(uid))) roles = self.__find_objects(project_dn, query) @@ -333,7 +334,7 @@ class LdapDriver(object): def delete_project(self, project_id): """Delete a project""" - project_dn = 'cn=%s,%s' % (project_id, FLAGS.ldap_project_subtree) + project_dn = self.__project_to_dn(project_id) self.__delete_roles(project_dn) self.__delete_group(project_dn) @@ -365,9 +366,10 @@ class LdapDriver(object): def __get_ldap_user(self, uid): """Retrieve LDAP user entry by id""" - attr = self.__find_object(self.__uid_to_dn(uid), - '(objectclass=novaUser)') - return attr + dn = FLAGS.ldap_user_subtree + query = ('(&(%s=%s)(objectclass=novaUser))' % + (FLAGS.ldap_user_id_attribute, uid)) + return self.__find_object(dn, query) def __find_object(self, dn, query=None, scope=None): """Find an object by dn and query""" @@ -418,15 +420,13 @@ class LdapDriver(object): query = '(objectclass=groupOfNames)' return self.__find_object(dn, query) is not None - @staticmethod - def __role_to_dn(role, project_id=None): + def __role_to_dn(self, role, project_id=None): """Convert role to corresponding dn""" if project_id is None: return FLAGS.__getitem__("ldap_%s" % role).value else: - return 'cn=%s,cn=%s,%s' % (role, - project_id, - FLAGS.ldap_project_subtree) + project_dn = self.__project_to_dn(project_id) + return 'cn=%s,%s' % (role, project_dn) def __create_group(self, group_dn, name, uid, description, member_uids=None): @@ -502,8 +502,8 @@ class LdapDriver(object): try: self.conn.modify_s(group_dn, attr) except self.ldap.OBJECT_CLASS_VIOLATION: - logging.debug(_("Attempted to remove the last member of a group. " - "Deleting the group at %s instead."), group_dn) + LOG.debug(_("Attempted to remove the last member of a group. " + "Deleting the group at %s instead."), group_dn) self.__delete_group(group_dn) def __remove_from_all(self, uid): @@ -532,6 +532,42 @@ class LdapDriver(object): for role_dn in self.__find_role_dns(project_dn): self.__delete_group(role_dn) + def __to_project(self, attr): + """Convert ldap attributes to Project object""" + if attr is None: + return None + member_dns = attr.get('member', []) + return { + 'id': attr['cn'][0], + 'name': attr['cn'][0], + 'project_manager_id': + self.__dn_to_uid(attr[LdapDriver.project_attribute][0]), + 'description': attr.get('description', [None])[0], + 'member_ids': [self.__dn_to_uid(x) for x in member_dns]} + + def __uid_to_dn(self, uid, search=True): + """Convert uid to dn""" + # By default return a generated DN + userdn = (FLAGS.ldap_user_id_attribute + '=%s,%s' + % (uid, FLAGS.ldap_user_subtree)) + if search: + query = ('%s=%s' % (FLAGS.ldap_user_id_attribute, uid)) + user = self.__find_dns(FLAGS.ldap_user_subtree, query) + if len(user) > 0: + userdn = user[0] + return userdn + + def __project_to_dn(self, pid, search=True): + """Convert pid to dn""" + # By default return a generated DN + projectdn = ('cn=%s,%s' % (pid, FLAGS.ldap_project_subtree)) + if search: + query = ('(&(cn=%s)%s)' % (pid, LdapDriver.project_pattern)) + project = self.__find_dns(FLAGS.ldap_project_subtree, query) + if len(project) > 0: + projectdn = project[0] + return projectdn + @staticmethod def __to_user(attr): """Convert ldap attributes to User object""" @@ -548,30 +584,11 @@ class LdapDriver(object): else: return None - def __to_project(self, attr): - """Convert ldap attributes to Project object""" - if attr is None: - return None - member_dns = attr.get('member', []) - return { - 'id': attr['cn'][0], - 'name': attr['cn'][0], - 'project_manager_id': - self.__dn_to_uid(attr[LdapDriver.project_attribute][0]), - 'description': attr.get('description', [None])[0], - 'member_ids': [self.__dn_to_uid(x) for x in member_dns]} - @staticmethod def __dn_to_uid(dn): """Convert user dn to uid""" return dn.split(',')[0].split('=')[1] - @staticmethod - def __uid_to_dn(uid): - """Convert uid to dn""" - return (FLAGS.ldap_user_id_attribute + '=%s,%s' - % (uid, FLAGS.ldap_user_subtree)) - class FakeLdapDriver(LdapDriver): """Fake Ldap Auth driver""" diff --git a/nova/auth/manager.py b/nova/auth/manager.py index d3e266952d6e..1652e24e1c9e 100644 --- a/nova/auth/manager.py +++ b/nova/auth/manager.py @@ -20,7 +20,6 @@ Nova authentication management """ -import logging import os import shutil import string # pylint: disable-msg=W0402 @@ -33,6 +32,7 @@ from nova import crypto from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import utils from nova.auth import signer @@ -70,6 +70,8 @@ flags.DEFINE_string('credential_rc_file', '%src', flags.DEFINE_string('auth_driver', 'nova.auth.dbdriver.DbDriver', 'Driver that auth manager uses') +LOG = logging.getLogger('nova.auth.manager') + class AuthBase(object): """Base class for objects relating to auth @@ -254,43 +256,51 @@ class AuthManager(object): # TODO(vish): check for valid timestamp (access_key, _sep, project_id) = access.partition(':') - logging.info(_('Looking up user: %r'), access_key) + LOG.debug(_('Looking up user: %r'), access_key) user = self.get_user_from_access_key(access_key) - logging.info('user: %r', user) + LOG.debug('user: %r', user) if user == None: + LOG.audit(_("Failed authorization for access key %s"), access_key) raise exception.NotFound(_('No user found for access key %s') % access_key) # NOTE(vish): if we stop using project name as id we need better # logic to find a default project for user if project_id == '': + LOG.debug(_("Using project name = user name (%s)"), user.name) project_id = user.name project = self.get_project(project_id) if project == None: + LOG.audit(_("failed authorization: no project named %s (user=%s)"), + project_id, user.name) raise exception.NotFound(_('No project called %s could be found') % project_id) if not self.is_admin(user) and not self.is_project_member(user, project): + LOG.audit(_("Failed authorization: user %s not admin and not " + "member of project %s"), user.name, project.name) raise exception.NotFound(_('User %s is not a member of project %s') % (user.id, project.id)) if check_type == 's3': sign = signer.Signer(user.secret.encode()) expected_signature = sign.s3_authorization(headers, verb, path) - logging.debug('user.secret: %s', user.secret) - logging.debug('expected_signature: %s', expected_signature) - logging.debug('signature: %s', signature) + LOG.debug('user.secret: %s', user.secret) + LOG.debug('expected_signature: %s', expected_signature) + LOG.debug('signature: %s', signature) if signature != expected_signature: + LOG.audit(_("Invalid signature for user %s"), user.name) raise exception.NotAuthorized(_('Signature does not match')) elif check_type == 'ec2': # NOTE(vish): hmac can't handle unicode, so encode ensures that # secret isn't unicode expected_signature = signer.Signer(user.secret.encode()).generate( params, verb, server_string, path) - logging.debug('user.secret: %s', user.secret) - logging.debug('expected_signature: %s', expected_signature) - logging.debug('signature: %s', signature) + LOG.debug('user.secret: %s', user.secret) + LOG.debug('expected_signature: %s', expected_signature) + LOG.debug('signature: %s', signature) if signature != expected_signature: + LOG.audit(_("Invalid signature for user %s"), user.name) raise exception.NotAuthorized(_('Signature does not match')) return (user, project) @@ -398,6 +408,12 @@ class AuthManager(object): raise exception.NotFound(_("The %s role can not be found") % role) if project is not None and role in FLAGS.global_roles: raise exception.NotFound(_("The %s role is global only") % role) + if project: + LOG.audit(_("Adding role %s to user %s in project %s"), role, + User.safe_id(user), Project.safe_id(project)) + else: + LOG.audit(_("Adding sitewide role %s to user %s"), role, + User.safe_id(user)) with self.driver() as drv: drv.add_role(User.safe_id(user), role, Project.safe_id(project)) @@ -418,6 +434,12 @@ class AuthManager(object): @type project: Project or project_id @param project: Project in which to remove local role. """ + if project: + LOG.audit(_("Removing role %s from user %s on project %s"), + role, User.safe_id(user), Project.safe_id(project)) + else: + LOG.audit(_("Removing sitewide role %s from user %s"), role, + User.safe_id(user)) with self.driver() as drv: drv.remove_role(User.safe_id(user), role, Project.safe_id(project)) @@ -480,6 +502,8 @@ class AuthManager(object): description, member_users) if project_dict: + LOG.audit(_("Created project %s with manager %s"), name, + manager_user) project = Project(**project_dict) return project @@ -496,6 +520,7 @@ class AuthManager(object): @param project: This will be the new description of the project. """ + LOG.audit(_("modifying project %s"), Project.safe_id(project)) if manager_user: manager_user = User.safe_id(manager_user) with self.driver() as drv: @@ -505,6 +530,8 @@ class AuthManager(object): def add_to_project(self, user, project): """Add user to project""" + LOG.audit(_("Adding user %s to project %s"), User.safe_id(user), + Project.safe_id(project)) with self.driver() as drv: return drv.add_to_project(User.safe_id(user), Project.safe_id(project)) @@ -523,6 +550,8 @@ class AuthManager(object): def remove_from_project(self, user, project): """Removes a user from a project""" + LOG.audit(_("Remove user %s from project %s"), User.safe_id(user), + Project.safe_id(project)) with self.driver() as drv: return drv.remove_from_project(User.safe_id(user), Project.safe_id(project)) @@ -549,6 +578,7 @@ class AuthManager(object): def delete_project(self, project): """Deletes a project""" + LOG.audit(_("Deleting project %s"), Project.safe_id(project)) with self.driver() as drv: drv.delete_project(Project.safe_id(project)) @@ -603,13 +633,16 @@ class AuthManager(object): with self.driver() as drv: user_dict = drv.create_user(name, access, secret, admin) if user_dict: - return User(**user_dict) + rv = User(**user_dict) + LOG.audit(_("Created user %s (admin: %r)"), rv.name, rv.admin) + return rv def delete_user(self, user): """Deletes a user Additionally deletes all users key_pairs""" uid = User.safe_id(user) + LOG.audit(_("Deleting user %s"), uid) db.key_pair_destroy_all_by_user(context.get_admin_context(), uid) with self.driver() as drv: @@ -618,6 +651,12 @@ class AuthManager(object): def modify_user(self, user, access_key=None, secret_key=None, admin=None): """Modify credentials for a user""" uid = User.safe_id(user) + if access_key: + LOG.audit(_("Access Key change for user %s"), uid) + if secret_key: + LOG.audit(_("Secret Key change for user %s"), uid) + if admin is not None: + LOG.audit(_("Admin status set to %r for user %s"), admin, uid) with self.driver() as drv: drv.modify_user(uid, access_key, secret_key, admin) @@ -643,10 +682,9 @@ class AuthManager(object): region, _sep, region_host = item.partition("=") regions[region] = region_host else: - regions = {'nova': FLAGS.cc_host} + regions = {'nova': FLAGS.ec2_host} for region, host in regions.iteritems(): - rc = self.__generate_rc(user.access, - user.secret, + rc = self.__generate_rc(user, pid, use_dmz, host) @@ -666,7 +704,7 @@ class AuthManager(object): port=vpn_port) zippy.writestr(FLAGS.credential_vpn_file, config) else: - logging.warn(_("No vpn data for project %s"), pid) + LOG.warn(_("No vpn data for project %s"), pid) zippy.writestr(FLAGS.ca_file, crypto.fetch_ca(pid)) zippy.close() @@ -683,30 +721,35 @@ class AuthManager(object): if project is None: project = user.id pid = Project.safe_id(project) - return self.__generate_rc(user.access, user.secret, pid, use_dmz) + return self.__generate_rc(user, pid, use_dmz) @staticmethod - def __generate_rc(access, secret, pid, use_dmz=True, host=None): + def __generate_rc(user, pid, use_dmz=True, host=None): """Generate rc file for user""" if use_dmz: - cc_host = FLAGS.cc_dmz + ec2_host = FLAGS.ec2_dmz_host else: - cc_host = FLAGS.cc_host + ec2_host = FLAGS.ec2_host # NOTE(vish): Always use the dmz since it is used from inside the # instance s3_host = FLAGS.s3_dmz if host: s3_host = host - cc_host = host + ec2_host = host rc = open(FLAGS.credentials_template).read() - rc = rc % {'access': access, + rc = rc % {'access': user.access, 'project': pid, - 'secret': secret, - 'ec2': '%s://%s:%s%s' % (FLAGS.ec2_prefix, - cc_host, - FLAGS.cc_port, - FLAGS.ec2_suffix), + 'secret': user.secret, + 'ec2': '%s://%s:%s%s' % (FLAGS.ec2_scheme, + ec2_host, + FLAGS.ec2_port, + FLAGS.ec2_path), 's3': 'http://%s:%s' % (s3_host, FLAGS.s3_port), + 'os': '%s://%s:%s%s' % (FLAGS.osapi_scheme, + ec2_host, + FLAGS.osapi_port, + FLAGS.osapi_path), + 'user': user.name, 'nova': FLAGS.ca_file, 'cert': FLAGS.credential_cert_file, 'key': FLAGS.credential_key_file} diff --git a/nova/auth/novarc.template b/nova/auth/novarc.template index 1b8ecb1734b5..c53a4acdc287 100644 --- a/nova/auth/novarc.template +++ b/nova/auth/novarc.template @@ -10,3 +10,7 @@ export NOVA_CERT=${NOVA_KEY_DIR}/%(nova)s export EUCALYPTUS_CERT=${NOVA_CERT} # euca-bundle-image seems to require this set alias ec2-bundle-image="ec2-bundle-image --cert ${EC2_CERT} --privatekey ${EC2_PRIVATE_KEY} --user 42 --ec2cert ${NOVA_CERT}" alias ec2-upload-bundle="ec2-upload-bundle -a ${EC2_ACCESS_KEY} -s ${EC2_SECRET_KEY} --url ${S3_URL} --ec2cert ${NOVA_CERT}" +export CLOUD_SERVERS_API_KEY="%(access)s" +export CLOUD_SERVERS_USERNAME="%(user)s" +export CLOUD_SERVERS_URL="%(os)s" + diff --git a/nova/auth/signer.py b/nova/auth/signer.py index f7d29f53431e..744e315d4877 100644 --- a/nova/auth/signer.py +++ b/nova/auth/signer.py @@ -46,7 +46,6 @@ Utility class for parsing signed AMI manifests. import base64 import hashlib import hmac -import logging import urllib # NOTE(vish): for new boto @@ -54,9 +53,13 @@ import boto # NOTE(vish): for old boto import boto.utils +from nova import log as logging from nova.exception import Error +LOG = logging.getLogger('nova.signer') + + class Signer(object): """Hacked up code from boto/connection.py""" @@ -120,7 +123,7 @@ class Signer(object): def _calc_signature_2(self, params, verb, server_string, path): """Generate AWS signature version 2 string.""" - logging.debug('using _calc_signature_2') + LOG.debug('using _calc_signature_2') string_to_sign = '%s\n%s\n%s\n' % (verb, server_string, path) if self.hmac_256: current_hmac = self.hmac_256 @@ -136,13 +139,13 @@ class Signer(object): val = urllib.quote(val, safe='-_~') pairs.append(urllib.quote(key, safe='') + '=' + val) qs = '&'.join(pairs) - logging.debug('query string: %s', qs) + LOG.debug('query string: %s', qs) string_to_sign += qs - logging.debug('string_to_sign: %s', string_to_sign) + LOG.debug('string_to_sign: %s', string_to_sign) current_hmac.update(string_to_sign) b64 = base64.b64encode(current_hmac.digest()) - logging.debug('len(b64)=%d', len(b64)) - logging.debug('base64 encoded digest: %s', b64) + LOG.debug('len(b64)=%d', len(b64)) + LOG.debug('base64 encoded digest: %s', b64) return b64 diff --git a/nova/cloudpipe/pipelib.py b/nova/cloudpipe/pipelib.py index 09361828d5d6..dc6f55af290c 100644 --- a/nova/cloudpipe/pipelib.py +++ b/nova/cloudpipe/pipelib.py @@ -22,7 +22,6 @@ an instance with it. """ -import logging import os import string import tempfile @@ -33,6 +32,7 @@ from nova import crypto from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import utils from nova.auth import manager # TODO(eday): Eventually changes these to something not ec2-specific @@ -51,7 +51,7 @@ flags.DEFINE_string('dmz_mask', _('Netmask to push into openvpn config')) -LOG = logging.getLogger('nova-cloudpipe') +LOG = logging.getLogger('nova.cloudpipe') class CloudPipe(object): @@ -68,8 +68,8 @@ class CloudPipe(object): shellfile = open(FLAGS.boot_script_template, "r") s = string.Template(shellfile.read()) shellfile.close() - boot_script = s.substitute(cc_dmz=FLAGS.cc_dmz, - cc_port=FLAGS.cc_port, + boot_script = s.substitute(cc_dmz=FLAGS.ec2_dmz_host, + cc_port=FLAGS.ec2_port, dmz_net=FLAGS.dmz_net, dmz_mask=FLAGS.dmz_mask, num_vpn=FLAGS.cnt_vpn_clients) diff --git a/nova/compute/api.py b/nova/compute/api.py index 64d47b1ce093..a6b99c1cb81f 100644 --- a/nova/compute/api.py +++ b/nova/compute/api.py @@ -21,12 +21,13 @@ Handles all requests relating to instances (guest vms). """ import datetime -import logging +import re import time from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import network from nova import quota from nova import rpc @@ -36,6 +37,7 @@ from nova.compute import instance_types from nova.db import base FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.compute.api') def generate_default_hostname(instance_id): @@ -46,7 +48,8 @@ def generate_default_hostname(instance_id): class API(base.Base): """API for interacting with the compute manager.""" - def __init__(self, image_service=None, network_api=None, volume_api=None, + def __init__(self, image_service=None, network_api=None, + volume_api=None, hostname_factory=generate_default_hostname, **kwargs): if not image_service: image_service = utils.import_object(FLAGS.image_service) @@ -57,19 +60,21 @@ class API(base.Base): if not volume_api: volume_api = volume.API() self.volume_api = volume_api + self.hostname_factory = hostname_factory super(API, self).__init__(**kwargs) def get_network_topic(self, context, instance_id): + """Get the network topic for an instance.""" try: instance = self.get(context, instance_id) except exception.NotFound as e: - logging.warning("Instance %d was not found in get_network_topic", - instance_id) + LOG.warning(_("Instance %d was not found in get_network_topic"), + instance_id) raise e host = instance['host'] if not host: - raise exception.Error("Instance %d has no host" % instance_id) + raise exception.Error(_("Instance %d has no host") % instance_id) topic = self.db.queue_get_for(context, FLAGS.compute_topic, host) return rpc.call(context, topic, @@ -80,18 +85,17 @@ class API(base.Base): min_count=1, max_count=1, display_name='', display_description='', key_name=None, key_data=None, security_group='default', - availability_zone=None, user_data=None, - generate_hostname=generate_default_hostname): + availability_zone=None, user_data=None): """Create the number of instances requested if quota and other arguments check out ok.""" type_data = instance_types.INSTANCE_TYPES[instance_type] num_instances = quota.allowed_instances(context, max_count, type_data) if num_instances < min_count: - logging.warn("Quota exceeeded for %s, tried to run %s instances", - context.project_id, min_count) - raise quota.QuotaError("Instance quota exceeded. You can only " - "run %s more instances of this type." % + LOG.warn(_("Quota exceeeded for %s, tried to run %s instances"), + context.project_id, min_count) + raise quota.QuotaError(_("Instance quota exceeded. You can only " + "run %s more instances of this type.") % num_instances, "InstanceLimitExceeded") is_vpn = image_id == FLAGS.vpn_image_id @@ -105,8 +109,10 @@ class API(base.Base): if kernel_id == str(FLAGS.null_kernel): kernel_id = None ramdisk_id = None - logging.debug("Creating a raw instance") + LOG.debug(_("Creating a raw instance")) # Make sure we have access to kernel and ramdisk (if not raw) + logging.debug("Using Kernel=%s, Ramdisk=%s" % + (kernel_id, ramdisk_id)) if kernel_id: self.image_service.show(context, kernel_id) if ramdisk_id: @@ -147,11 +153,12 @@ class API(base.Base): 'user_data': user_data or '', 'key_name': key_name, 'key_data': key_data, + 'locked': False, 'availability_zone': availability_zone} elevated = context.elevated() instances = [] - logging.debug(_("Going to run %s instances..."), num_instances) + LOG.debug(_("Going to run %s instances..."), num_instances) for num in range(num_instances): instance = dict(mac_address=utils.generate_mac(), launch_index=num, @@ -168,22 +175,27 @@ class API(base.Base): security_group_id) # Set sane defaults if not specified - updates = dict(hostname=generate_hostname(instance_id)) - if 'display_name' not in instance: + updates = dict(hostname=self.hostname_factory(instance_id)) + if (not hasattr(instance, 'display_name') or + instance.display_name == None): updates['display_name'] = "Server %s" % instance_id instance = self.update(context, instance_id, **updates) instances.append(instance) - logging.debug(_("Casting to scheduler for %s/%s's instance %s"), + LOG.debug(_("Casting to scheduler for %s/%s's instance %s"), context.project_id, context.user_id, instance_id) rpc.cast(context, FLAGS.scheduler_topic, {"method": "run_instance", "args": {"topic": FLAGS.compute_topic, - "instance_id": instance_id}}) + "instance_id": instance_id, + "availability_zone": availability_zone}}) - return instances + for group_id in security_groups: + self.trigger_security_group_members_refresh(elevated, group_id) + + return [dict(x.iteritems()) for x in instances] def ensure_default_security_group(self, context): """ Create security group for the security context if it @@ -202,6 +214,60 @@ class API(base.Base): 'project_id': context.project_id} db.security_group_create(context, values) + def trigger_security_group_rules_refresh(self, context, security_group_id): + """Called when a rule is added to or removed from a security_group""" + + security_group = self.db.security_group_get(context, security_group_id) + + hosts = set() + for instance in security_group['instances']: + if instance['host'] is not None: + hosts.add(instance['host']) + + for host in hosts: + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "refresh_security_group_rules", + "args": {"security_group_id": security_group.id}}) + + def trigger_security_group_members_refresh(self, context, group_id): + """Called when a security group gains a new or loses a member + + Sends an update request to each compute node for whom this is + relevant.""" + + # First, we get the security group rules that reference this group as + # the grantee.. + security_group_rules = \ + self.db.security_group_rule_get_by_security_group_grantee( + context, + group_id) + + # ..then we distill the security groups to which they belong.. + security_groups = set() + for rule in security_group_rules: + security_groups.add(rule['parent_group_id']) + + # ..then we find the instances that are members of these groups.. + instances = set() + for security_group in security_groups: + for instance in security_group['instances']: + instances.add(instance['id']) + + # ...then we find the hosts where they live... + hosts = set() + for instance in instances: + if instance['host']: + hosts.add(instance['host']) + + # ...and finally we tell these nodes to refresh their view of this + # particular security group. + for host in hosts: + rpc.cast(context, + self.db.queue_get_for(context, FLAGS.compute_topic, host), + {"method": "refresh_security_group_members", + "args": {"security_group_id": group_id}}) + def update(self, context, instance_id, **kwargs): """Updates the instance in the datastore. @@ -214,20 +280,21 @@ class API(base.Base): :retval None """ - return self.db.instance_update(context, instance_id, kwargs) + rv = self.db.instance_update(context, instance_id, kwargs) + return dict(rv.iteritems()) def delete(self, context, instance_id): - logging.debug("Going to try and terminate %s" % instance_id) + LOG.debug(_("Going to try to terminate %s"), instance_id) try: instance = self.get(context, instance_id) except exception.NotFound as e: - logging.warning(_("Instance %s was not found during terminate"), - instance_id) + LOG.warning(_("Instance %d was not found during terminate"), + instance_id) raise e if (instance['state_description'] == 'terminating'): - logging.warning(_("Instance %s is already being terminated"), - instance_id) + LOG.warning(_("Instance %d is already being terminated"), + instance_id) return self.update(context, @@ -238,16 +305,15 @@ class API(base.Base): host = instance['host'] if host: - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "terminate_instance", - "args": {"instance_id": instance_id}}) + self._cast_compute_message('terminate_instance', context, + instance_id, host) else: self.db.instance_destroy(context, instance_id) def get(self, context, instance_id): """Get a single instance with the given ID.""" - return self.db.instance_get_by_id(context, instance_id) + rv = self.db.instance_get_by_id(context, instance_id) + return dict(rv.iteritems()) def get_all(self, context, project_id=None, reservation_id=None, fixed_ip=None): @@ -256,7 +322,7 @@ class API(base.Base): an admin, it will retreive all instances in the system.""" if reservation_id is not None: return self.db.instance_get_all_by_reservation(context, - reservation_id) + reservation_id) if fixed_ip is not None: return self.db.fixed_ip_get_instance(context, fixed_ip) if project_id or not context.is_admin: @@ -269,50 +335,74 @@ class API(base.Base): project_id) return self.db.instance_get_all(context) + def _cast_compute_message(self, method, context, instance_id, host=None, + params=None): + """Generic handler for RPC casts to compute. + + :param params: Optional dictionary of arguments to be passed to the + compute worker + + :retval None + """ + if not params: + params = {} + if not host: + instance = self.get(context, instance_id) + host = instance['host'] + queue = self.db.queue_get_for(context, FLAGS.compute_topic, host) + params['instance_id'] = instance_id + kwargs = {'method': method, 'args': params} + rpc.cast(context, queue, kwargs) + + def _call_compute_message(self, method, context, instance_id, host=None, + params=None): + """Generic handler for RPC calls to compute. + + :param params: Optional dictionary of arguments to be passed to the + compute worker + + :retval: Result returned by compute worker + """ + if not params: + params = {} + if not host: + instance = self.get(context, instance_id) + host = instance["host"] + queue = self.db.queue_get_for(context, FLAGS.compute_topic, host) + params['instance_id'] = instance_id + kwargs = {'method': method, 'args': params} + return rpc.call(context, queue, kwargs) + def snapshot(self, context, instance_id, name): - """Snapshot the given instance.""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "snapshot_instance", - "args": {"instance_id": instance_id, "name": name}}) + """Snapshot the given instance. + + :retval: A dict containing image metadata + """ + data = {'name': name, 'is_public': False} + image_meta = self.image_service.create(context, data) + params = {'image_id': image_meta['id']} + self._cast_compute_message('snapshot_instance', context, instance_id, + params=params) + return image_meta def reboot(self, context, instance_id): """Reboot the given instance.""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "reboot_instance", - "args": {"instance_id": instance_id}}) + self._cast_compute_message('reboot_instance', context, instance_id) def pause(self, context, instance_id): """Pause the given instance.""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "pause_instance", - "args": {"instance_id": instance_id}}) + self._cast_compute_message('pause_instance', context, instance_id) def unpause(self, context, instance_id): """Unpause the given instance.""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "unpause_instance", - "args": {"instance_id": instance_id}}) + self._cast_compute_message('unpause_instance', context, instance_id) def get_diagnostics(self, context, instance_id): """Retrieve diagnostics for the given instance.""" - instance = self.get(context, instance_id) - host = instance["host"] - return rpc.call(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "get_diagnostics", - "args": {"instance_id": instance_id}}) + return self._call_compute_message( + "get_diagnostics", + context, + instance_id) def get_actions(self, context, instance_id): """Retrieve actions for the given instance.""" @@ -320,39 +410,55 @@ class API(base.Base): def suspend(self, context, instance_id): """suspend the instance with instance_id""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "suspend_instance", - "args": {"instance_id": instance_id}}) + self._cast_compute_message('suspend_instance', context, instance_id) def resume(self, context, instance_id): """resume the instance with instance_id""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "resume_instance", - "args": {"instance_id": instance_id}}) + self._cast_compute_message('resume_instance', context, instance_id) def rescue(self, context, instance_id): """Rescue the given instance.""" - instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "rescue_instance", - "args": {"instance_id": instance_id}}) + self._cast_compute_message('rescue_instance', context, instance_id) def unrescue(self, context, instance_id): """Unrescue the given instance.""" + self._cast_compute_message('unrescue_instance', context, instance_id) + + def set_admin_password(self, context, instance_id): + """Set the root/admin password for the given instance.""" + self._cast_compute_message('set_admin_password', context, instance_id) + + def get_ajax_console(self, context, instance_id): + """Get a url to an AJAX Console""" instance = self.get(context, instance_id) - host = instance['host'] - rpc.cast(context, - self.db.queue_get_for(context, FLAGS.compute_topic, host), - {"method": "unrescue_instance", - "args": {"instance_id": instance_id}}) + output = self._call_compute_message('get_ajax_console', + context, + instance_id) + rpc.cast(context, '%s' % FLAGS.ajax_console_proxy_topic, + {'method': 'authorize_ajax_console', + 'args': {'token': output['token'], 'host': output['host'], + 'port': output['port']}}) + return {'url': '%s?token=%s' % (FLAGS.ajax_console_proxy_url, + output['token'])} + + def get_console_output(self, context, instance_id): + """Get console output for an an instance""" + return self._call_compute_message('get_console_output', + context, + instance_id) + + def lock(self, context, instance_id): + """lock the instance with instance_id""" + self._cast_compute_message('lock_instance', context, instance_id) + + def unlock(self, context, instance_id): + """unlock the instance with instance_id""" + self._cast_compute_message('unlock_instance', context, instance_id) + + def get_lock(self, context, instance_id): + """return the boolean state of (instance with instance_id)'s lock""" + instance = self.get(context, instance_id) + return instance['locked'] def attach_volume(self, context, instance_id, volume_id, device): if not re.match("^/dev/[a-z]d[a-z]+$", device): diff --git a/nova/compute/disk.py b/nova/compute/disk.py deleted file mode 100644 index 814a258cd61e..000000000000 --- a/nova/compute/disk.py +++ /dev/null @@ -1,204 +0,0 @@ -# vim: tabstop=4 shiftwidth=4 softtabstop=4 - -# Copyright 2010 United States Government as represented by the -# Administrator of the National Aeronautics and Space Administration. -# All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. -""" -Utility methods to resize, repartition, and modify disk images. - -Includes injection of SSH PGP keys into authorized_keys file. - -""" - -import logging -import os -import tempfile - -from nova import exception -from nova import flags - - -FLAGS = flags.FLAGS -flags.DEFINE_integer('minimum_root_size', 1024 * 1024 * 1024 * 10, - 'minimum size in bytes of root partition') -flags.DEFINE_integer('block_size', 1024 * 1024 * 256, - 'block_size to use for dd') - - -def partition(infile, outfile, local_bytes=0, resize=True, - local_type='ext2', execute=None): - """ - Turns a partition (infile) into a bootable drive image (outfile). - - The first 63 sectors (0-62) of the resulting image is a master boot record. - Infile becomes the first primary partition. - If local bytes is specified, a second primary partition is created and - formatted as ext2. - - :: - - In the diagram below, dashes represent drive sectors. - +-----+------. . .-------+------. . .------+ - | 0 a| b c|d e| - +-----+------. . .-------+------. . .------+ - | mbr | primary partiton | local partition | - +-----+------. . .-------+------. . .------+ - - """ - sector_size = 512 - file_size = os.path.getsize(infile) - if resize and file_size < FLAGS.minimum_root_size: - last_sector = FLAGS.minimum_root_size / sector_size - 1 - execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d' - % (infile, last_sector, sector_size)) - execute('e2fsck -fp %s' % infile, check_exit_code=False) - execute('resize2fs %s' % infile) - file_size = FLAGS.minimum_root_size - elif file_size % sector_size != 0: - logging.warn(_("Input partition size not evenly divisible by" - " sector size: %d / %d"), file_size, sector_size) - primary_sectors = file_size / sector_size - if local_bytes % sector_size != 0: - logging.warn(_("Bytes for local storage not evenly divisible" - " by sector size: %d / %d"), local_bytes, sector_size) - local_sectors = local_bytes / sector_size - - mbr_last = 62 # a - primary_first = mbr_last + 1 # b - primary_last = primary_first + primary_sectors - 1 # c - local_first = primary_last + 1 # d - local_last = local_first + local_sectors - 1 # e - last_sector = local_last # e - - # create an empty file - execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d' - % (outfile, mbr_last, sector_size)) - - # make mbr partition - execute('parted --script %s mklabel msdos' % outfile) - - # append primary file - execute('dd if=%s of=%s bs=%s conv=notrunc,fsync oflag=append' - % (infile, outfile, FLAGS.block_size)) - - # make primary partition - execute('parted --script %s mkpart primary %ds %ds' - % (outfile, primary_first, primary_last)) - - if local_bytes > 0: - # make the file bigger - execute('dd if=/dev/zero of=%s count=1 seek=%d bs=%d' - % (outfile, last_sector, sector_size)) - # make and format local partition - execute('parted --script %s mkpartfs primary %s %ds %ds' - % (outfile, local_type, local_first, local_last)) - - -def extend(image, size, execute): - file_size = os.path.getsize(image) - if file_size >= size: - return - return execute('truncate -s size %s' % (image,)) - - -def inject_data(image, key=None, net=None, partition=None, execute=None): - """Injects a ssh key and optionally net data into a disk image. - - it will mount the image as a fully partitioned disk and attempt to inject - into the specified partition number. - - If partition is not specified it mounts the image as a single partition. - - """ - out, err = execute('sudo losetup --find --show %s' % image) - if err: - raise exception.Error(_('Could not attach image to loopback: %s') - % err) - device = out.strip() - try: - if not partition is None: - # create partition - out, err = execute('sudo kpartx -a %s' % device) - if err: - raise exception.Error(_('Failed to load partition: %s') % err) - mapped_device = '/dev/mapper/%sp%s' % (device.split('/')[-1], - partition) - else: - mapped_device = device - - # We can only loopback mount raw images. If the device isn't there, - # it's normally because it's a .vmdk or a .vdi etc - if not os.path.exists(mapped_device): - raise exception.Error('Mapped device was not found (we can' - ' only inject raw disk images): %s' % - mapped_device) - - # Configure ext2fs so that it doesn't auto-check every N boots - out, err = execute('sudo tune2fs -c 0 -i 0 %s' % mapped_device) - - tmpdir = tempfile.mkdtemp() - try: - # mount loopback to dir - out, err = execute( - 'sudo mount %s %s' % (mapped_device, tmpdir)) - if err: - raise exception.Error(_('Failed to mount filesystem: %s') - % err) - - try: - if key: - # inject key file - _inject_key_into_fs(key, tmpdir, execute=execute) - if net: - _inject_net_into_fs(net, tmpdir, execute=execute) - finally: - # unmount device - execute('sudo umount %s' % mapped_device) - finally: - # remove temporary directory - execute('rmdir %s' % tmpdir) - if not partition is None: - # remove partitions - execute('sudo kpartx -d %s' % device) - finally: - # remove loopback - execute('sudo losetup --detach %s' % device) - - -def _inject_key_into_fs(key, fs, execute=None): - """Add the given public ssh key to root's authorized_keys. - - key is an ssh key string. - fs is the path to the base of the filesystem into which to inject the key. - """ - sshdir = os.path.join(fs, 'root', '.ssh') - execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter - execute('sudo chown root %s' % sshdir) - execute('sudo chmod 700 %s' % sshdir) - keyfile = os.path.join(sshdir, 'authorized_keys') - execute('sudo tee -a %s' % keyfile, '\n' + key.strip() + '\n') - - -def _inject_net_into_fs(net, fs, execute=None): - """Inject /etc/network/interfaces into the filesystem rooted at fs. - - net is the contents of /etc/network/interfaces. - """ - netdir = os.path.join(os.path.join(fs, 'etc'), 'network') - execute('sudo mkdir -p %s' % netdir) # existing dir doesn't matter - execute('sudo chown root:root %s' % netdir) - execute('sudo chmod 755 %s' % netdir) - netfile = os.path.join(netdir, 'interfaces') - execute('sudo tee %s' % netfile, net) diff --git a/nova/compute/manager.py b/nova/compute/manager.py index ca6065890d89..6f09ce674a51 100644 --- a/nova/compute/manager.py +++ b/nova/compute/manager.py @@ -35,10 +35,15 @@ terminating it. """ import datetime +import random +import string import logging +import socket +import functools from nova import exception from nova import flags +from nova import log as logging from nova import manager from nova import rpc from nova import utils @@ -51,6 +56,47 @@ flags.DEFINE_string('compute_driver', 'nova.virt.connection.get_connection', 'Driver to use for controlling virtualization') flags.DEFINE_string('stub_network', False, 'Stub network related code') +flags.DEFINE_integer('password_length', 12, + 'Length of generated admin passwords') +flags.DEFINE_string('console_host', socket.gethostname(), + 'Console proxy host to use to connect to instances on' + 'this host.') + +LOG = logging.getLogger('nova.compute.manager') + + +def checks_instance_lock(function): + """ + decorator used for preventing action against locked instances + unless, of course, you happen to be admin + + """ + + @functools.wraps(function) + def decorated_function(self, context, instance_id, *args, **kwargs): + + LOG.info(_("check_instance_lock: decorating: |%s|"), function, + context=context) + LOG.info(_("check_instance_lock: arguments: |%s| |%s| |%s|"), + self, context, instance_id, context=context) + locked = self.get_lock(context, instance_id) + admin = context.is_admin + LOG.info(_("check_instance_lock: locked: |%s|"), locked, + context=context) + LOG.info(_("check_instance_lock: admin: |%s|"), admin, + context=context) + + # if admin or unlocked call function otherwise log error + if admin or not locked: + LOG.info(_("check_instance_lock: executing: |%s|"), function, + context=context) + function(self, context, instance_id, *args, **kwargs) + else: + LOG.error(_("check_instance_lock: not executing |%s|"), + function, context=context) + return False + + return decorated_function class ComputeManager(manager.Manager): @@ -85,6 +131,15 @@ class ComputeManager(manager.Manager): state = power_state.NOSTATE self.db.instance_set_state(context, instance_id, state) + def get_console_topic(self, context, **_kwargs): + """Retrieves the console host for a project on this host + Currently this is just set in the flags for each compute + host.""" + #TODO(mdragon): perhaps make this variable by console_type? + return self.db.queue_get_for(context, + FLAGS.console_topic, + FLAGS.console_host) + def get_network_topic(self, context, **_kwargs): """Retrieves the network host for a project on this host""" # TODO(vish): This method should be memoized. This will make @@ -99,10 +154,20 @@ class ComputeManager(manager.Manager): FLAGS.network_topic, host) + def get_console_pool_info(self, context, console_type): + return self.driver.get_console_pool_info(console_type) + @exception.wrap_exception - def refresh_security_group(self, context, security_group_id, **_kwargs): - """This call passes stright through to the virtualization driver.""" - self.driver.refresh_security_group(security_group_id) + def refresh_security_group_rules(self, context, + security_group_id, **_kwargs): + """This call passes straight through to the virtualization driver.""" + return self.driver.refresh_security_group_rules(security_group_id) + + @exception.wrap_exception + def refresh_security_group_members(self, context, + security_group_id, **_kwargs): + """This call passes straight through to the virtualization driver.""" + return self.driver.refresh_security_group_members(security_group_id) @exception.wrap_exception def run_instance(self, context, instance_id, **_kwargs): @@ -111,7 +176,8 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_get(context, instance_id) if instance_ref['name'] in self.driver.list_instances(): raise exception.Error(_("Instance has already been created")) - logging.debug(_("instance %s: starting..."), instance_id) + LOG.audit(_("instance %s: starting..."), instance_id, + context=context) self.db.instance_update(context, instance_id, {'host': self.host}) @@ -149,8 +215,8 @@ class ComputeManager(manager.Manager): instance_id, {'launched_at': now}) except Exception: # pylint: disable-msg=W0702 - logging.exception(_("instance %s: Failed to spawn"), - instance_ref['name']) + LOG.exception(_("instance %s: Failed to spawn"), instance_id, + context=context) self.db.instance_set_state(context, instance_id, power_state.SHUTDOWN) @@ -158,17 +224,19 @@ class ComputeManager(manager.Manager): self._update_state(context, instance_id) @exception.wrap_exception + @checks_instance_lock def terminate_instance(self, context, instance_id): """Terminate an instance on this machine.""" context = context.elevated() - instance_ref = self.db.instance_get(context, instance_id) + LOG.audit(_("Terminating instance %s"), instance_id, context=context) if not FLAGS.stub_network: address = self.db.instance_get_floating_address(context, instance_ref['id']) if address: - logging.debug(_("Disassociating address %s") % address) + LOG.debug(_("Disassociating address %s"), address, + context=context) # NOTE(vish): Right now we don't really care if the ip is # disassociated. We may need to worry about # checking this later. @@ -180,15 +248,14 @@ class ComputeManager(manager.Manager): address = self.db.instance_get_fixed_address(context, instance_ref['id']) if address: - logging.debug(_("Deallocating address %s") % address) + LOG.debug(_("Deallocating address %s"), address, + context=context) # NOTE(vish): Currently, nothing needs to be done on the # network node until release. If this changes, # we will need to cast here. self.network_manager.deallocate_fixed_ip(context.elevated(), address) - logging.debug(_("instance %s: terminating"), instance_id) - volumes = instance_ref.get('volumes', []) or [] for volume in volumes: self.detach_volume(context, instance_id, volume['id']) @@ -202,20 +269,22 @@ class ComputeManager(manager.Manager): self.db.instance_destroy(context, instance_id) @exception.wrap_exception + @checks_instance_lock def reboot_instance(self, context, instance_id): """Reboot an instance on this server.""" context = context.elevated() self._update_state(context, instance_id) instance_ref = self.db.instance_get(context, instance_id) + LOG.audit(_("Rebooting instance %s"), instance_id, context=context) if instance_ref['state'] != power_state.RUNNING: - logging.warn(_('trying to reboot a non-running ' - 'instance: %s (state: %s excepted: %s)'), - instance_id, - instance_ref['state'], - power_state.RUNNING) + LOG.warn(_('trying to reboot a non-running ' + 'instance: %s (state: %s excepted: %s)'), + instance_id, + instance_ref['state'], + power_state.RUNNING, + context=context) - logging.debug(_('instance %s: rebooting'), instance_ref['name']) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -225,7 +294,7 @@ class ComputeManager(manager.Manager): self._update_state(context, instance_id) @exception.wrap_exception - def snapshot_instance(self, context, instance_id, name): + def snapshot_instance(self, context, instance_id, image_id): """Snapshot an instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) @@ -235,23 +304,51 @@ class ComputeManager(manager.Manager): # potentially? self._update_state(context, instance_id) - logging.debug(_('instance %s: snapshotting'), instance_ref['name']) + LOG.audit(_('instance %s: snapshotting'), instance_id, + context=context) if instance_ref['state'] != power_state.RUNNING: - logging.warn(_('trying to snapshot a non-running ' - 'instance: %s (state: %s excepted: %s)'), - instance_id, - instance_ref['state'], - power_state.RUNNING) + LOG.warn(_('trying to snapshot a non-running ' + 'instance: %s (state: %s excepted: %s)'), + instance_id, instance_ref['state'], power_state.RUNNING) - self.driver.snapshot(instance_ref, name) + self.driver.snapshot(instance_ref, image_id) @exception.wrap_exception + @checks_instance_lock + def set_admin_password(self, context, instance_id, new_pass=None): + """Set the root/admin password for an instance on this server.""" + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + if instance_ref['state'] != power_state.RUNNING: + logging.warn('trying to reset the password on a non-running ' + 'instance: %s (state: %s expected: %s)', + instance_ref['id'], + instance_ref['state'], + power_state.RUNNING) + + logging.debug('instance %s: setting admin password', + instance_ref['name']) + if new_pass is None: + # Generate a random password + new_pass = self._generate_password(FLAGS.password_length) + + self.driver.set_admin_password(instance_ref, new_pass) + self._update_state(context, instance_id) + + def _generate_password(self, length=20): + """Generate a random sequence of letters and digits + to be used as a password. + """ + chrs = string.letters + string.digits + return "".join([random.choice(chrs) for i in xrange(length)]) + + @exception.wrap_exception + @checks_instance_lock def rescue_instance(self, context, instance_id): """Rescue an instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - logging.debug(_('instance %s: rescuing'), instance_id) + LOG.audit(_('instance %s: rescuing'), instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -261,12 +358,12 @@ class ComputeManager(manager.Manager): self._update_state(context, instance_id) @exception.wrap_exception + @checks_instance_lock def unrescue_instance(self, context, instance_id): """Rescue an instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - logging.debug(_('instance %s: unrescuing'), instance_id) + LOG.audit(_('instance %s: unrescuing'), instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -280,12 +377,12 @@ class ComputeManager(manager.Manager): self._update_state(context, instance_id) @exception.wrap_exception + @checks_instance_lock def pause_instance(self, context, instance_id): """Pause an instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - logging.debug('instance %s: pausing', instance_id) + LOG.audit(_('instance %s: pausing'), instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -297,12 +394,12 @@ class ComputeManager(manager.Manager): result)) @exception.wrap_exception + @checks_instance_lock def unpause_instance(self, context, instance_id): """Unpause a paused instance on this server.""" context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - logging.debug('instance %s: unpausing', instance_id) + LOG.audit(_('instance %s: unpausing'), instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, @@ -319,17 +416,20 @@ class ComputeManager(manager.Manager): instance_ref = self.db.instance_get(context, instance_id) if instance_ref["state"] == power_state.RUNNING: - logging.debug(_("instance %s: retrieving diagnostics"), - instance_id) + LOG.audit(_("instance %s: retrieving diagnostics"), instance_id, + context=context) return self.driver.get_diagnostics(instance_ref) @exception.wrap_exception + @checks_instance_lock def suspend_instance(self, context, instance_id): - """suspend the instance with instance_id""" + """ + suspend the instance with instance_id + + """ context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - logging.debug(_('instance %s: suspending'), instance_id) + LOG.audit(_('instance %s: suspending'), instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, 'suspending') @@ -340,12 +440,15 @@ class ComputeManager(manager.Manager): result)) @exception.wrap_exception + @checks_instance_lock def resume_instance(self, context, instance_id): - """resume the suspended instance with instance_id""" + """ + resume the suspended instance with instance_id + + """ context = context.elevated() instance_ref = self.db.instance_get(context, instance_id) - - logging.debug(_('instance %s: resuming'), instance_id) + LOG.audit(_('instance %s: resuming'), instance_id, context=context) self.db.instance_set_state(context, instance_id, power_state.NOSTATE, 'resuming') @@ -355,22 +458,67 @@ class ComputeManager(manager.Manager): instance_id, result)) + @exception.wrap_exception + def lock_instance(self, context, instance_id): + """ + lock the instance with instance_id + + """ + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + + LOG.debug(_('instance %s: locking'), instance_id, context=context) + self.db.instance_update(context, instance_id, {'locked': True}) + + @exception.wrap_exception + def unlock_instance(self, context, instance_id): + """ + unlock the instance with instance_id + + """ + context = context.elevated() + instance_ref = self.db.instance_get(context, instance_id) + + LOG.debug(_('instance %s: unlocking'), instance_id, context=context) + self.db.instance_update(context, instance_id, {'locked': False}) + + @exception.wrap_exception + def get_lock(self, context, instance_id): + """ + return the boolean state of (instance with instance_id)'s lock + + """ + context = context.elevated() + LOG.debug(_('instance %s: getting locked state'), instance_id, + context=context) + instance_ref = self.db.instance_get(context, instance_id) + return instance_ref['locked'] + @exception.wrap_exception def get_console_output(self, context, instance_id): """Send the console output for an instance.""" context = context.elevated() - logging.debug(_("instance %s: getting console output"), instance_id) instance_ref = self.db.instance_get(context, instance_id) - + LOG.audit(_("Get console output for instance %s"), instance_id, + context=context) return self.driver.get_console_output(instance_ref) @exception.wrap_exception + def get_ajax_console(self, context, instance_id): + """Return connection information for an ajax console""" + context = context.elevated() + logging.debug(_("instance %s: getting ajax console"), instance_id) + instance_ref = self.db.instance_get(context, instance_id) + + return self.driver.get_ajax_console(instance_ref) + + @checks_instance_lock def attach_volume(self, context, instance_id, volume_id, mountpoint): """Attach a volume to an instance.""" context = context.elevated() - logging.debug(_("instance %s: attaching volume %s to %s"), instance_id, - volume_id, mountpoint) instance_ref = self.db.instance_get(context, instance_id) + LOG.audit(_("instance %s: attaching volume %s to %s"), instance_id, + volume_id, mountpoint, context=context) dev_path = self.volume_manager.setup_compute_volume(context, volume_id) try: @@ -385,8 +533,8 @@ class ComputeManager(manager.Manager): # NOTE(vish): The inline callback eats the exception info so we # log the traceback here and reraise the same # ecxception below. - logging.exception(_("instance %s: attach failed %s, removing"), - instance_id, mountpoint) + LOG.exception(_("instance %s: attach failed %s, removing"), + instance_id, mountpoint, context=context) self.volume_manager.remove_compute_volume(context, volume_id) raise exc @@ -394,17 +542,18 @@ class ComputeManager(manager.Manager): return True @exception.wrap_exception + @checks_instance_lock def detach_volume(self, context, instance_id, volume_id): """Detach a volume from an instance.""" context = context.elevated() - logging.debug(_("instance %s: detaching volume %s"), - instance_id, - volume_id) instance_ref = self.db.instance_get(context, instance_id) volume_ref = self.db.volume_get(context, volume_id) + LOG.audit(_("Detach volume %s from mountpoint %s on instance %s"), + volume_id, volume_ref['mountpoint'], instance_id, + context=context) if instance_ref['name'] not in self.driver.list_instances(): - logging.warn(_("Detaching volume from unknown instance %s"), - instance_ref['name']) + LOG.warn(_("Detaching volume from unknown instance %s"), + instance_id, context=context) else: self.driver.detach_volume(instance_ref['name'], volume_ref['mountpoint']) diff --git a/nova/compute/monitor.py b/nova/compute/monitor.py index 60c347a5ea6e..14d0e8ca1714 100644 --- a/nova/compute/monitor.py +++ b/nova/compute/monitor.py @@ -25,19 +25,17 @@ Instance Monitoring: """ import datetime -import logging import os -import sys import time import boto import boto.s3 import rrdtool -from twisted.internet import defer from twisted.internet import task from twisted.application import service from nova import flags +from nova import log as logging from nova.virt import connection as virt_connection @@ -91,6 +89,9 @@ RRD_VALUES = { utcnow = datetime.datetime.utcnow +LOG = logging.getLogger('nova.compute.monitor') + + def update_rrd(instance, name, data): """ Updates the specified RRD file. @@ -255,20 +256,20 @@ class Instance(object): Updates the instances statistics and stores the resulting graphs in the internal object store on the cloud controller. """ - logging.debug(_('updating %s...'), self.instance_id) + LOG.debug(_('updating %s...'), self.instance_id) try: data = self.fetch_cpu_stats() if data != None: - logging.debug('CPU: %s', data) + LOG.debug('CPU: %s', data) update_rrd(self, 'cpu', data) data = self.fetch_net_stats() - logging.debug('NET: %s', data) + LOG.debug('NET: %s', data) update_rrd(self, 'net', data) data = self.fetch_disk_stats() - logging.debug('DISK: %s', data) + LOG.debug('DISK: %s', data) update_rrd(self, 'disk', data) # TODO(devcamcar): Turn these into pool.ProcessPool.execute() calls @@ -285,7 +286,7 @@ class Instance(object): graph_disk(self, '1w') graph_disk(self, '1m') except Exception: - logging.exception(_('unexpected error during update')) + LOG.exception(_('unexpected error during update')) self.last_updated = utcnow() @@ -309,7 +310,7 @@ class Instance(object): self.cputime = float(info['cpu_time']) self.cputime_last_updated = utcnow() - logging.debug('CPU: %d', self.cputime) + LOG.debug('CPU: %d', self.cputime) # Skip calculation on first pass. Need delta to get a meaningful value. if cputime_last_updated == None: @@ -319,17 +320,17 @@ class Instance(object): d = self.cputime_last_updated - cputime_last_updated t = d.days * 86400 + d.seconds - logging.debug('t = %d', t) + LOG.debug('t = %d', t) # Calculate change over time in number of nanoseconds of CPU time used. cputime_delta = self.cputime - cputime_last - logging.debug('cputime_delta = %s', cputime_delta) + LOG.debug('cputime_delta = %s', cputime_delta) # Get the number of virtual cpus in this domain. vcpus = int(info['num_cpu']) - logging.debug('vcpus = %d', vcpus) + LOG.debug('vcpus = %d', vcpus) # Calculate CPU % used and cap at 100. return min(cputime_delta / (t * vcpus * 1.0e9) * 100, 100) @@ -351,8 +352,8 @@ class Instance(object): rd += rd_bytes wr += wr_bytes except TypeError: - logging.error(_('Cannot get blockstats for "%s" on "%s"'), - disk, self.instance_id) + LOG.error(_('Cannot get blockstats for "%s" on "%s"'), + disk, self.instance_id) raise return '%d:%d' % (rd, wr) @@ -373,8 +374,8 @@ class Instance(object): rx += stats[0] tx += stats[4] except TypeError: - logging.error(_('Cannot get ifstats for "%s" on "%s"'), - interface, self.instance_id) + LOG.error(_('Cannot get ifstats for "%s" on "%s"'), + interface, self.instance_id) raise return '%d:%d' % (rx, tx) @@ -408,7 +409,7 @@ class InstanceMonitor(object, service.Service): try: conn = virt_connection.get_connection(read_only=True) except Exception, exn: - logging.exception(_('unexpected exception getting connection')) + LOG.exception(_('unexpected exception getting connection')) time.sleep(FLAGS.monitoring_instances_delay) return @@ -416,14 +417,14 @@ class InstanceMonitor(object, service.Service): try: self.updateInstances_(conn, domain_ids) except Exception, exn: - logging.exception('updateInstances_') + LOG.exception('updateInstances_') def updateInstances_(self, conn, domain_ids): for domain_id in domain_ids: if not domain_id in self._instances: instance = Instance(conn, domain_id) self._instances[domain_id] = instance - logging.debug(_('Found instance: %s'), domain_id) + LOG.debug(_('Found instance: %s'), domain_id) for key in self._instances.keys(): instance = self._instances[key] diff --git a/nova/console/__init__.py b/nova/console/__init__.py new file mode 100644 index 000000000000..dfc72cd61565 --- /dev/null +++ b/nova/console/__init__.py @@ -0,0 +1,13 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +""" +:mod:`nova.console` -- Console Prxy to set up VM console access (i.e. with xvp) +===================================================== + +.. automodule:: nova.console + :platform: Unix + :synopsis: Wrapper around console proxies such as xvp to set up + multitenant VM console access +.. moduleauthor:: Monsyne Dragon +""" +from nova.console.api import API diff --git a/nova/console/api.py b/nova/console/api.py new file mode 100644 index 000000000000..3850d2c44e53 --- /dev/null +++ b/nova/console/api.py @@ -0,0 +1,75 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Handles ConsoleProxy API requests +""" + +from nova import exception +from nova.db import base + + +from nova import flags +from nova import rpc + + +FLAGS = flags.FLAGS + + +class API(base.Base): + """API for spining up or down console proxy connections""" + + def __init__(self, **kwargs): + super(API, self).__init__(**kwargs) + + def get_consoles(self, context, instance_id): + return self.db.console_get_all_by_instance(context, instance_id) + + def get_console(self, context, instance_id, console_id): + return self.db.console_get(context, console_id, instance_id) + + def delete_console(self, context, instance_id, console_id): + console = self.db.console_get(context, + console_id, + instance_id) + pool = console['pool'] + rpc.cast(context, + self.db.queue_get_for(context, + FLAGS.console_topic, + pool['host']), + {"method": "remove_console", + "args": {"console_id": console['id']}}) + + def create_console(self, context, instance_id): + instance = self.db.instance_get(context, instance_id) + #NOTE(mdragon): If we wanted to return this the console info + # here, as we would need to do a call. + # They can just do an index later to fetch + # console info. I am not sure which is better + # here. + rpc.cast(context, + self._get_console_topic(context, instance['host']), + {"method": "add_console", + "args": {"instance_id": instance_id}}) + + def _get_console_topic(self, context, instance_host): + topic = self.db.queue_get_for(context, + FLAGS.compute_topic, + instance_host) + return rpc.call(context, + topic, + {"method": "get_console_topic", "args": {'fake': 1}}) diff --git a/nova/console/fake.py b/nova/console/fake.py new file mode 100644 index 000000000000..7a90d5221001 --- /dev/null +++ b/nova/console/fake.py @@ -0,0 +1,58 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Fake ConsoleProxy driver for tests. +""" + +from nova import exception + + +class FakeConsoleProxy(object): + """Fake ConsoleProxy driver.""" + + @property + def console_type(self): + return "fake" + + def setup_console(self, context, console): + """Sets up actual proxies""" + pass + + def teardown_console(self, context, console): + """Tears down actual proxies""" + pass + + def init_host(self): + """Start up any config'ed consoles on start""" + pass + + def generate_password(self, length=8): + """Returns random console password""" + return "fakepass" + + def get_port(self, context): + """get available port for consoles that need one""" + return 5999 + + def fix_pool_password(self, password): + """Trim password to length, and any other massaging""" + return password + + def fix_console_password(self, password): + """Trim password to length, and any other massaging""" + return password diff --git a/nova/console/manager.py b/nova/console/manager.py new file mode 100644 index 000000000000..c55ca8e8f52c --- /dev/null +++ b/nova/console/manager.py @@ -0,0 +1,127 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Console Proxy Service +""" + +import functools +import logging +import socket + +from nova import exception +from nova import flags +from nova import manager +from nova import rpc +from nova import utils + +FLAGS = flags.FLAGS +flags.DEFINE_string('console_driver', + 'nova.console.xvp.XVPConsoleProxy', + 'Driver to use for the console proxy') +flags.DEFINE_boolean('stub_compute', False, + 'Stub calls to compute worker for tests') +flags.DEFINE_string('console_public_hostname', + socket.gethostname(), + 'Publicly visable name for this console host') + + +class ConsoleProxyManager(manager.Manager): + + """ Sets up and tears down any proxy connections needed for accessing + instance consoles securely""" + + def __init__(self, console_driver=None, *args, **kwargs): + if not console_driver: + console_driver = FLAGS.console_driver + self.driver = utils.import_object(console_driver) + super(ConsoleProxyManager, self).__init__(*args, **kwargs) + self.driver.host = self.host + + def init_host(self): + self.driver.init_host() + + @exception.wrap_exception + def add_console(self, context, instance_id, password=None, + port=None, **kwargs): + instance = self.db.instance_get(context, instance_id) + host = instance['host'] + name = instance['name'] + pool = self.get_pool_for_instance_host(context, host) + try: + console = self.db.console_get_by_pool_instance(context, + pool['id'], + instance_id) + except exception.NotFound: + logging.debug("Adding console") + if not password: + password = self.driver.generate_password() + if not port: + port = self.driver.get_port(context) + console_data = {'instance_name': name, + 'instance_id': instance_id, + 'password': password, + 'pool_id': pool['id']} + if port: + console_data['port'] = port + console = self.db.console_create(context, console_data) + self.driver.setup_console(context, console) + return console['id'] + + @exception.wrap_exception + def remove_console(self, context, console_id, **_kwargs): + try: + console = self.db.console_get(context, console_id) + except exception.NotFound: + logging.debug(_('Tried to remove non-existant console ' + '%(console_id)s.') % + {'console_id': console_id}) + return + self.db.console_delete(context, console_id) + self.driver.teardown_console(context, console) + + def get_pool_for_instance_host(self, context, instance_host): + context = context.elevated() + console_type = self.driver.console_type + try: + pool = self.db.console_pool_get_by_host_type(context, + instance_host, + self.host, + console_type) + except exception.NotFound: + #NOTE(mdragon): Right now, the only place this info exists is the + # compute worker's flagfile, at least for + # xenserver. Thus we ned to ask. + if FLAGS.stub_compute: + pool_info = {'address': '127.0.0.1', + 'username': 'test', + 'password': '1234pass'} + else: + pool_info = rpc.call(context, + self.db.queue_get_for(context, + FLAGS.compute_topic, + instance_host), + {"method": "get_console_pool_info", + "args": {"console_type": console_type}}) + pool_info['password'] = self.driver.fix_pool_password( + pool_info['password']) + pool_info['host'] = self.host + pool_info['public_hostname'] = FLAGS.console_public_hostname + pool_info['console_type'] = self.driver.console_type + pool_info['compute_host'] = instance_host + pool = self.db.console_pool_create(context, pool_info) + return pool diff --git a/nova/console/xvp.conf.template b/nova/console/xvp.conf.template new file mode 100644 index 000000000000..695ddbe964df --- /dev/null +++ b/nova/console/xvp.conf.template @@ -0,0 +1,16 @@ +# One time password use with time window +OTP ALLOW IPCHECK HTTP 60 +#if $multiplex_port +MULTIPLEX $multiplex_port +#end if + +#for $pool in $pools +POOL $pool.address + DOMAIN $pool.address + MANAGER root $pool.password + HOST $pool.address + VM - dummy 0123456789ABCDEF + #for $console in $pool.consoles + VM #if $multiplex_port then '-' else $console.port # $console.instance_name $pass_encode($console.password) + #end for +#end for diff --git a/nova/console/xvp.py b/nova/console/xvp.py new file mode 100644 index 000000000000..2a76223dacc7 --- /dev/null +++ b/nova/console/xvp.py @@ -0,0 +1,194 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Openstack, LLC. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +XVP (Xenserver VNC Proxy) driver. +""" + +import fcntl +import logging +import os +import signal +import subprocess + +from Cheetah.Template import Template + +from nova import context +from nova import db +from nova import exception +from nova import flags +from nova import utils + +flags.DEFINE_string('console_xvp_conf_template', + utils.abspath('console/xvp.conf.template'), + 'XVP conf template') +flags.DEFINE_string('console_xvp_conf', + '/etc/xvp.conf', + 'generated XVP conf file') +flags.DEFINE_string('console_xvp_pid', + '/var/run/xvp.pid', + 'XVP master process pid file') +flags.DEFINE_string('console_xvp_log', + '/var/log/xvp.log', + 'XVP log file') +flags.DEFINE_integer('console_xvp_multiplex_port', + 5900, + "port for XVP to multiplex VNC connections on") +FLAGS = flags.FLAGS + + +class XVPConsoleProxy(object): + """Sets up XVP config, and manages xvp daemon""" + + def __init__(self): + self.xvpconf_template = open(FLAGS.console_xvp_conf_template).read() + self.host = FLAGS.host # default, set by manager. + super(XVPConsoleProxy, self).__init__() + + @property + def console_type(self): + return "vnc+xvp" + + def get_port(self, context): + """get available port for consoles that need one""" + #TODO(mdragon): implement port selection for non multiplex ports, + # we are not using that, but someone else may want + # it. + return FLAGS.console_xvp_multiplex_port + + def setup_console(self, context, console): + """Sets up actual proxies""" + self._rebuild_xvp_conf(context.elevated()) + + def teardown_console(self, context, console): + """Tears down actual proxies""" + self._rebuild_xvp_conf(context.elevated()) + + def init_host(self): + """Start up any config'ed consoles on start""" + ctxt = context.get_admin_context() + self._rebuild_xvp_conf(ctxt) + + def fix_pool_password(self, password): + """Trim password to length, and encode""" + return self._xvp_encrypt(password, is_pool_password=True) + + def fix_console_password(self, password): + """Trim password to length, and encode""" + return self._xvp_encrypt(password) + + def generate_password(self, length=8): + """Returns random console password""" + return os.urandom(length * 2).encode('base64')[:length] + + def _rebuild_xvp_conf(self, context): + logging.debug("Rebuilding xvp conf") + pools = [pool for pool in + db.console_pool_get_all_by_host_type(context, self.host, + self.console_type) + if pool['consoles']] + if not pools: + logging.debug("No console pools!") + self._xvp_stop() + return + conf_data = {'multiplex_port': FLAGS.console_xvp_multiplex_port, + 'pools': pools, + 'pass_encode': self.fix_console_password} + config = str(Template(self.xvpconf_template, searchList=[conf_data])) + self._write_conf(config) + self._xvp_restart() + + def _write_conf(self, config): + logging.debug('Re-wrote %s' % FLAGS.console_xvp_conf) + with open(FLAGS.console_xvp_conf, 'w') as cfile: + cfile.write(config) + + def _xvp_stop(self): + logging.debug("Stopping xvp") + pid = self._xvp_pid() + if not pid: + return + try: + os.kill(pid, signal.SIGTERM) + except OSError: + #if it's already not running, no problem. + pass + + def _xvp_start(self): + if self._xvp_check_running(): + return + logging.debug("Starting xvp") + try: + utils.execute('xvp -p %s -c %s -l %s' % + (FLAGS.console_xvp_pid, + FLAGS.console_xvp_conf, + FLAGS.console_xvp_log)) + except exception.ProcessExecutionError, err: + logging.error("Error starting xvp: %s" % err) + + def _xvp_restart(self): + logging.debug("Restarting xvp") + if not self._xvp_check_running(): + logging.debug("xvp not running...") + self._xvp_start() + else: + pid = self._xvp_pid() + os.kill(pid, signal.SIGUSR1) + + def _xvp_pid(self): + try: + with open(FLAGS.console_xvp_pid, 'r') as pidfile: + pid = int(pidfile.read()) + except IOError: + return None + except ValueError: + return None + return pid + + def _xvp_check_running(self): + pid = self._xvp_pid() + if not pid: + return False + try: + os.kill(pid, 0) + except OSError: + return False + return True + + def _xvp_encrypt(self, password, is_pool_password=False): + """Call xvp to obfuscate passwords for config file. + + Args: + - password: the password to encode, max 8 char for vm passwords, + and 16 chars for pool passwords. passwords will + be trimmed to max len before encoding. + - is_pool_password: True if this this is the XenServer api password + False if it's a VM console password + (xvp uses different keys and max lengths for pool passwords) + + Note that xvp's obfuscation should not be considered 'real' encryption. + It simply DES encrypts the passwords with static keys plainly viewable + in the xvp source code.""" + maxlen = 8 + flag = '-e' + if is_pool_password: + maxlen = 16 + flag = '-x' + #xvp will blow up on passwords that are too long (mdragon) + password = password[:maxlen] + out, err = utils.execute('xvp %s' % flag, process_input=password) + return out.strip() diff --git a/nova/crypto.py b/nova/crypto.py index b8405552d12b..a34b940f50d5 100644 --- a/nova/crypto.py +++ b/nova/crypto.py @@ -24,7 +24,6 @@ Includes root and intermediate CAs, SSH key_pairs and x509 certificates. import base64 import gettext import hashlib -import logging import os import shutil import struct @@ -39,8 +38,10 @@ gettext.install('nova', unicode=1) from nova import context from nova import db from nova import flags +from nova import log as logging +LOG = logging.getLogger("nova.crypto") FLAGS = flags.FLAGS flags.DEFINE_string('ca_file', 'cacert.pem', _('Filename of root CA')) flags.DEFINE_string('key_file', @@ -254,7 +255,7 @@ def _sign_csr(csr_text, ca_folder): csrfile = open(inbound, "w") csrfile.write(csr_text) csrfile.close() - logging.debug(_("Flags path: %s") % ca_folder) + LOG.debug(_("Flags path: %s"), ca_folder) start = os.getcwd() # Change working dir to CA os.chdir(ca_folder) diff --git a/nova/db/api.py b/nova/db/api.py index 0fa5eb1e8e42..f9d561587b19 100644 --- a/nova/db/api.py +++ b/nova/db/api.py @@ -42,6 +42,10 @@ flags.DEFINE_string('db_backend', 'sqlalchemy', 'The backend to use for db') flags.DEFINE_boolean('enable_new_services', True, 'Services to be added to the available pool on create') +flags.DEFINE_string('instance_name_template', 'instance-%08x', + 'Template string to be used to generate instance names') +flags.DEFINE_string('volume_name_template', 'volume-%08x', + 'Template string to be used to generate instance names') IMPL = utils.LazyPluggable(FLAGS['db_backend'], @@ -81,11 +85,21 @@ def service_get(context, service_id): return IMPL.service_get(context, service_id) +def service_get_all(context, disabled=False): + """Get all service.""" + return IMPL.service_get_all(context, None, disabled) + + def service_get_all_by_topic(context, topic): - """Get all compute services for a given topic.""" + """Get all services for a given topic.""" return IMPL.service_get_all_by_topic(context, topic) +def service_get_all_by_host(context, host): + """Get all services for a given host.""" + return IMPL.service_get_all_by_host(context, host) + + def service_get_all_compute_sorted(context): """Get all compute services sorted by instance count. @@ -285,6 +299,10 @@ def fixed_ip_get_instance(context, address): return IMPL.fixed_ip_get_instance(context, address) +def fixed_ip_get_instance_v6(context, address): + return IMPL.fixed_ip_get_instance_v6(context, address) + + def fixed_ip_get_network(context, address): """Get a network for a fixed ip by address.""" return IMPL.fixed_ip_get_network(context, address) @@ -343,6 +361,10 @@ def instance_get_fixed_address(context, instance_id): return IMPL.instance_get_fixed_address(context, instance_id) +def instance_get_fixed_address_v6(context, instance_id): + return IMPL.instance_get_fixed_address_v6(context, instance_id) + + def instance_get_floating_address(context, instance_id): """Get the first floating ip address of an instance.""" return IMPL.instance_get_floating_address(context, instance_id) @@ -538,6 +560,10 @@ def project_get_network(context, project_id, associate=True): return IMPL.project_get_network(context, project_id) +def project_get_network_v6(context, project_id): + return IMPL.project_get_network_v6(context, project_id) + + ################### @@ -772,6 +798,13 @@ def security_group_rule_get_by_security_group(context, security_group_id): security_group_id) +def security_group_rule_get_by_security_group_grantee(context, + security_group_id): + """Get all rules that grant access to the given security group.""" + return IMPL.security_group_rule_get_by_security_group_grantee(context, + security_group_id) + + def security_group_rule_destroy(context, security_group_rule_id): """Deletes a security group rule.""" return IMPL.security_group_rule_destroy(context, security_group_rule_id) @@ -894,3 +927,57 @@ def host_get_networks(context, host): """ return IMPL.host_get_networks(context, host) + + +################## + + +def console_pool_create(context, values): + """Create console pool.""" + return IMPL.console_pool_create(context, values) + + +def console_pool_get(context, pool_id): + """Get a console pool.""" + return IMPL.console_pool_get(context, pool_id) + + +def console_pool_get_by_host_type(context, compute_host, proxy_host, + console_type): + """Fetch a console pool for a given proxy host, compute host, and type.""" + return IMPL.console_pool_get_by_host_type(context, + compute_host, + proxy_host, + console_type) + + +def console_pool_get_all_by_host_type(context, host, console_type): + """Fetch all pools for given proxy host and type.""" + return IMPL.console_pool_get_all_by_host_type(context, + host, + console_type) + + +def console_create(context, values): + """Create a console.""" + return IMPL.console_create(context, values) + + +def console_delete(context, console_id): + """Delete a console.""" + return IMPL.console_delete(context, console_id) + + +def console_get_by_pool_instance(context, pool_id, instance_id): + """Get console entry for a given instance and pool.""" + return IMPL.console_get_by_pool_instance(context, pool_id, instance_id) + + +def console_get_all_by_instance(context, instance_id): + """Get consoles for a given instance.""" + return IMPL.console_get_all_by_instance(context, instance_id) + + +def console_get(context, console_id, instance_id=None): + """Get a specific console (possibly on a given instance).""" + return IMPL.console_get(context, console_id, instance_id) diff --git a/tools/iscsidev.sh b/nova/db/migration.py old mode 100755 new mode 100644 similarity index 52% rename from tools/iscsidev.sh rename to nova/db/migration.py index 6f5b572df89c..e54b90cd8f17 --- a/tools/iscsidev.sh +++ b/nova/db/migration.py @@ -1,4 +1,4 @@ -#!/bin/sh +# vim: tabstop=4 shiftwidth=4 softtabstop=4 # Copyright 2010 United States Government as represented by the # Administrator of the National Aeronautics and Space Administration. @@ -15,27 +15,24 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +"""Database setup and migration commands.""" -# NOTE(vish): This script helps udev create common names for discovered iscsi -# volumes under /dev/iscsi. To use it, create /dev/iscsi and add -# a file to /etc/udev/rules.d like so: -# mkdir /dev/iscsi -# echo 'KERNEL=="sd*", BUS=="scsi", PROGRAM="/path/to/iscsidev.sh -# %b",SYMLINK+="iscsi/%c%n"' > /etc/udev/rules.d/55-openiscsi.rules +from nova import flags +from nova import utils -BUS=${1} -HOST=${BUS%%:*} +FLAGS = flags.FLAGS +flags.DECLARE('db_backend', 'nova.db.api') -if [ ! -e /sys/class/iscsi_host ]; then - exit 1 -fi -file="/sys/class/iscsi_host/host${HOST}/device/session*/iscsi_session*/session*/targetname" +IMPL = utils.LazyPluggable(FLAGS['db_backend'], + sqlalchemy='nova.db.sqlalchemy.migration') -target_name=$(cat ${file}) -if [ -z "${target_name}" ]; then - exit 1 -fi +def db_sync(version=None): + """Migrate the database to `version` or the most recent version.""" + return IMPL.db_sync(version=version) -echo "${target_name##*:}" + +def db_version(): + """Display the current database version.""" + return IMPL.db_version() diff --git a/nova/db/sqlalchemy/__init__.py b/nova/db/sqlalchemy/__init__.py index 22aa1cfe6c8b..747015af53eb 100644 --- a/nova/db/sqlalchemy/__init__.py +++ b/nova/db/sqlalchemy/__init__.py @@ -15,29 +15,3 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - -""" -SQLAlchemy database backend -""" -import logging -import time - -from sqlalchemy.exc import OperationalError - -from nova import flags -from nova.db.sqlalchemy import models - - -FLAGS = flags.FLAGS - - -for i in xrange(FLAGS.sql_max_retries): - if i > 0: - time.sleep(FLAGS.sql_retry_interval) - - try: - models.register_models() - break - except OperationalError: - logging.exception(_("Data store is unreachable." - " Trying again in %d seconds.") % FLAGS.sql_retry_interval) diff --git a/nova/db/sqlalchemy/api.py b/nova/db/sqlalchemy/api.py index aaa07e3c9291..b63b84bed595 100644 --- a/nova/db/sqlalchemy/api.py +++ b/nova/db/sqlalchemy/api.py @@ -134,6 +134,18 @@ def service_get(context, service_id, session=None): return result +@require_admin_context +def service_get_all(context, session=None, disabled=False): + if not session: + session = get_session() + + result = session.query(models.Service).\ + filter_by(deleted=can_read_deleted(context)).\ + filter_by(disabled=disabled).\ + all() + return result + + @require_admin_context def service_get_all_by_topic(context, topic): session = get_session() @@ -144,6 +156,15 @@ def service_get_all_by_topic(context, topic): all() +@require_admin_context +def service_get_all_by_host(context, host): + session = get_session() + return session.query(models.Service).\ + filter_by(deleted=False).\ + filter_by(host=host).\ + all() + + @require_admin_context def _service_get_all_topic_subquery(context, session, topic, subq, label): sort_value = getattr(subq.c, label) @@ -585,6 +606,17 @@ def fixed_ip_get_instance(context, address): return fixed_ip_ref.instance +@require_context +def fixed_ip_get_instance_v6(context, address): + session = get_session() + mac = utils.to_mac(address) + + result = session.query(models.Instance).\ + filter_by(mac_address=mac).\ + first() + return result + + @require_admin_context def fixed_ip_get_network(context, address): fixed_ip_ref = fixed_ip_get_by_address(context, address) @@ -650,7 +682,7 @@ def instance_get(context, instance_id, session=None): if is_admin_context(context): result = session.query(models.Instance).\ options(joinedload_all('fixed_ip.floating_ips')).\ - options(joinedload('security_groups')).\ + options(joinedload_all('security_groups.rules')).\ options(joinedload('volumes')).\ filter_by(id=instance_id).\ filter_by(deleted=can_read_deleted(context)).\ @@ -658,7 +690,7 @@ def instance_get(context, instance_id, session=None): elif is_user_context(context): result = session.query(models.Instance).\ options(joinedload_all('fixed_ip.floating_ips')).\ - options(joinedload('security_groups')).\ + options(joinedload_all('security_groups.rules')).\ options(joinedload('volumes')).\ filter_by(project_id=context.project_id).\ filter_by(id=instance_id).\ @@ -743,13 +775,16 @@ def instance_get_by_id(context, instance_id): if is_admin_context(context): result = session.query(models.Instance).\ + options(joinedload_all('fixed_ip.floating_ips')).\ options(joinedload('security_groups')).\ + options(joinedload_all('fixed_ip.floating_ips')).\ filter_by(id=instance_id).\ filter_by(deleted=can_read_deleted(context)).\ first() elif is_user_context(context): result = session.query(models.Instance).\ options(joinedload('security_groups')).\ + options(joinedload_all('fixed_ip.floating_ips')).\ filter_by(project_id=context.project_id).\ filter_by(id=instance_id).\ filter_by(deleted=False).\ @@ -770,6 +805,17 @@ def instance_get_fixed_address(context, instance_id): return instance_ref.fixed_ip['address'] +@require_context +def instance_get_fixed_address_v6(context, instance_id): + session = get_session() + with session.begin(): + instance_ref = instance_get(context, instance_id, session=session) + network_ref = network_get_by_instance(context, instance_id) + prefix = network_ref.cidr_v6 + mac = instance_ref.mac_address + return utils.to_global_ipv6(prefix, mac) + + @require_context def instance_get_floating_address(context, instance_id): session = get_session() @@ -840,12 +886,9 @@ def instance_action_create(context, values): def instance_get_actions(context, instance_id): """Return the actions associated to the given instance id""" session = get_session() - actions = {} - for action in session.query(models.InstanceActions).\ + return session.query(models.InstanceActions).\ filter_by(instance_id=instance_id).\ - all(): - actions[action.action] = action.error - return actions + all() ################### @@ -1110,6 +1153,11 @@ def project_get_network(context, project_id, associate=True): return result +@require_context +def project_get_network_v6(context, project_id): + return project_get_network(context, project_id) + + ################### @@ -1581,6 +1629,44 @@ def security_group_rule_get(context, security_group_rule_id, session=None): return result +@require_context +def security_group_rule_get_by_security_group(context, security_group_id, + session=None): + if not session: + session = get_session() + if is_admin_context(context): + result = session.query(models.SecurityGroupIngressRule).\ + filter_by(deleted=can_read_deleted(context)).\ + filter_by(parent_group_id=security_group_id).\ + all() + else: + # TODO(vish): Join to group and check for project_id + result = session.query(models.SecurityGroupIngressRule).\ + filter_by(deleted=False).\ + filter_by(parent_group_id=security_group_id).\ + all() + return result + + +@require_context +def security_group_rule_get_by_security_group_grantee(context, + security_group_id, + session=None): + if not session: + session = get_session() + if is_admin_context(context): + result = session.query(models.SecurityGroupIngressRule).\ + filter_by(deleted=can_read_deleted(context)).\ + filter_by(group_id=security_group_id).\ + all() + else: + result = session.query(models.SecurityGroupIngressRule).\ + filter_by(deleted=False).\ + filter_by(group_id=security_group_id).\ + all() + return result + + @require_context def security_group_rule_create(context, values): security_group_rule_ref = models.SecurityGroupIngressRule() @@ -1816,3 +1902,111 @@ def host_get_networks(context, host): filter_by(deleted=False).\ filter_by(host=host).\ all() + + +################## + + +def console_pool_create(context, values): + pool = models.ConsolePool() + pool.update(values) + pool.save() + return pool + + +def console_pool_get(context, pool_id): + session = get_session() + result = session.query(models.ConsolePool).\ + filter_by(deleted=False).\ + filter_by(id=pool_id).\ + first() + if not result: + raise exception.NotFound(_("No console pool with id %(pool_id)s") % + {'pool_id': pool_id}) + + return result + + +def console_pool_get_by_host_type(context, compute_host, host, + console_type): + session = get_session() + result = session.query(models.ConsolePool).\ + filter_by(host=host).\ + filter_by(console_type=console_type).\ + filter_by(compute_host=compute_host).\ + filter_by(deleted=False).\ + options(joinedload('consoles')).\ + first() + if not result: + raise exception.NotFound(_('No console pool of type %(type)s ' + 'for compute host %(compute_host)s ' + 'on proxy host %(host)s') % + {'type': console_type, + 'compute_host': compute_host, + 'host': host}) + return result + + +def console_pool_get_all_by_host_type(context, host, console_type): + session = get_session() + return session.query(models.ConsolePool).\ + filter_by(host=host).\ + filter_by(console_type=console_type).\ + filter_by(deleted=False).\ + options(joinedload('consoles')).\ + all() + + +def console_create(context, values): + console = models.Console() + console.update(values) + console.save() + return console + + +def console_delete(context, console_id): + session = get_session() + with session.begin(): + # consoles are meant to be transient. (mdragon) + session.execute('delete from consoles ' + 'where id=:id', {'id': console_id}) + + +def console_get_by_pool_instance(context, pool_id, instance_id): + session = get_session() + result = session.query(models.Console).\ + filter_by(pool_id=pool_id).\ + filter_by(instance_id=instance_id).\ + options(joinedload('pool')).\ + first() + if not result: + raise exception.NotFound(_('No console for instance %(instance_id)s ' + 'in pool %(pool_id)s') % + {'instance_id': instance_id, + 'pool_id': pool_id}) + return result + + +def console_get_all_by_instance(context, instance_id): + session = get_session() + results = session.query(models.Console).\ + filter_by(instance_id=instance_id).\ + options(joinedload('pool')).\ + all() + return results + + +def console_get(context, console_id, instance_id=None): + session = get_session() + query = session.query(models.Console).\ + filter_by(id=console_id) + if instance_id: + query = query.filter_by(instance_id=instance_id) + result = query.options(joinedload('pool')).first() + if not result: + idesc = (_("on instance %s") % instance_id) if instance_id else "" + raise exception.NotFound(_("No console with id %(console_id)s" + " %(instance)s") % + {'instance': idesc, + 'console_id': console_id}) + return result diff --git a/nova/db/sqlalchemy/migrate_repo/README b/nova/db/sqlalchemy/migrate_repo/README new file mode 100644 index 000000000000..6218f8cac424 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/README @@ -0,0 +1,4 @@ +This is a database migration repository. + +More information at +http://code.google.com/p/sqlalchemy-migrate/ diff --git a/nova/db/sqlalchemy/migrate_repo/__init__.py b/nova/db/sqlalchemy/migrate_repo/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/nova/db/sqlalchemy/migrate_repo/manage.py b/nova/db/sqlalchemy/migrate_repo/manage.py new file mode 100644 index 000000000000..09e340f44f93 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/manage.py @@ -0,0 +1,4 @@ +#!/usr/bin/env python +from migrate.versioning.shell import main +if __name__ == '__main__': + main(debug='False', repository='.') diff --git a/nova/db/sqlalchemy/migrate_repo/migrate.cfg b/nova/db/sqlalchemy/migrate_repo/migrate.cfg new file mode 100644 index 000000000000..2c75fb763251 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/migrate.cfg @@ -0,0 +1,20 @@ +[db_settings] +# Used to identify which repository this database is versioned under. +# You can use the name of your project. +repository_id=nova + +# The name of the database table used to track the schema version. +# This name shouldn't already be used by your project. +# If this is changed once a database is under version control, you'll need to +# change the table name in each database too. +version_table=migrate_version + +# When committing a change script, Migrate will attempt to generate the +# sql for all supported databases; normally, if one of them fails - probably +# because you don't have that database installed - it is ignored and the +# commit continues, perhaps ending successfully. +# Databases in this list MUST compile successfully during a commit, or the +# entire commit will fail. List the databases your application will actually +# be using to ensure your updates to that database work properly. +# This must be a list; example: ['postgres','sqlite'] +required_dbs=[] diff --git a/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py b/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py new file mode 100644 index 000000000000..a312a71909b6 --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/001_austin.py @@ -0,0 +1,547 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +## Table code mostly autogenerated by genmodel.py +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +auth_tokens = Table('auth_tokens', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('token_hash', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('user_id', Integer()), + Column('server_manageent_url', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('storage_url', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('cdn_management_url', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +export_devices = Table('export_devices', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('shelf_id', Integer()), + Column('blade_id', Integer()), + Column('volume_id', + Integer(), + ForeignKey('volumes.id'), + nullable=True), + ) + + +fixed_ips = Table('fixed_ips', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('address', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('network_id', + Integer(), + ForeignKey('networks.id'), + nullable=True), + Column('instance_id', + Integer(), + ForeignKey('instances.id'), + nullable=True), + Column('allocated', Boolean(create_constraint=True, name=None)), + Column('leased', Boolean(create_constraint=True, name=None)), + Column('reserved', Boolean(create_constraint=True, name=None)), + ) + + +floating_ips = Table('floating_ips', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('address', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('fixed_ip_id', + Integer(), + ForeignKey('fixed_ips.id'), + nullable=True), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +instances = Table('instances', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('internal_id', Integer()), + Column('admin_pass', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('image_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('kernel_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('ramdisk_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('launch_index', Integer()), + Column('key_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('key_data', + Text(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('state', Integer()), + Column('state_description', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('memory_mb', Integer()), + Column('vcpus', Integer()), + Column('local_gb', Integer()), + Column('hostname', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instance_type', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_data', + Text(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('reservation_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('mac_address', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('scheduled_at', DateTime(timezone=False)), + Column('launched_at', DateTime(timezone=False)), + Column('terminated_at', DateTime(timezone=False)), + Column('display_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +iscsi_targets = Table('iscsi_targets', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('target_num', Integer()), + Column('host', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('volume_id', + Integer(), + ForeignKey('volumes.id'), + nullable=True), + ) + + +key_pairs = Table('key_pairs', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('fingerprint', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('public_key', + Text(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +networks = Table('networks', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('injected', Boolean(create_constraint=True, name=None)), + Column('cidr', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('netmask', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('bridge', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('gateway', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('broadcast', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('dns', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('vlan', Integer()), + Column('vpn_public_address', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('vpn_public_port', Integer()), + Column('vpn_private_address', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('dhcp_start', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +projects = Table('projects', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('description', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_manager', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + ForeignKey('users.id')), + ) + + +quotas = Table('quotas', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instances', Integer()), + Column('cores', Integer()), + Column('volumes', Integer()), + Column('gigabytes', Integer()), + Column('floating_ips', Integer()), + ) + + +security_groups = Table('security_groups', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('description', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +security_group_inst_assoc = Table('security_group_instance_association', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('security_group_id', + Integer(), + ForeignKey('security_groups.id')), + Column('instance_id', Integer(), ForeignKey('instances.id')), + ) + + +security_group_rules = Table('security_group_rules', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('parent_group_id', + Integer(), + ForeignKey('security_groups.id')), + Column('protocol', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('from_port', Integer()), + Column('to_port', Integer()), + Column('cidr', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('group_id', + Integer(), + ForeignKey('security_groups.id')), + ) + + +services = Table('services', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('host', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('binary', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('topic', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('report_count', Integer(), nullable=False), + Column('disabled', Boolean(create_constraint=True, name=None)), + ) + + +users = Table('users', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('access_key', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('secret_key', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('is_admin', Boolean(create_constraint=True, name=None)), + ) + + +user_project_association = Table('user_project_association', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('user_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + ForeignKey('users.id'), + primary_key=True, + nullable=False), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + ForeignKey('projects.id'), + primary_key=True, + nullable=False), + ) + + +user_project_role_association = Table('user_project_role_association', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('user_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('role', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + ForeignKeyConstraint(['user_id', + 'project_id'], + ['user_project_association.user_id', + 'user_project_association.project_id']), + ) + + +user_role_association = Table('user_role_association', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('user_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + ForeignKey('users.id'), + primary_key=True, + nullable=False), + Column('role', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + ) + + +volumes = Table('volumes', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('ec2_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('user_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('size', Integer()), + Column('availability_zone', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instance_id', + Integer(), + ForeignKey('instances.id'), + nullable=True), + Column('mountpoint', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('attach_time', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('status', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('attach_status', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('scheduled_at', DateTime(timezone=False)), + Column('launched_at', DateTime(timezone=False)), + Column('terminated_at', DateTime(timezone=False)), + Column('display_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('display_description', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + + for table in (auth_tokens, export_devices, fixed_ips, floating_ips, + instances, iscsi_targets, key_pairs, networks, + projects, quotas, security_groups, security_group_inst_assoc, + security_group_rules, services, users, + user_project_association, user_project_role_association, + user_role_association, volumes): + try: + table.create() + except Exception: + logging.info(repr(table)) + logging.exception('Exception while creating table') + raise + + +def downgrade(migrate_engine): + # Operations to reverse the above upgrade go here. + for table in (auth_tokens, export_devices, fixed_ips, floating_ips, + instances, iscsi_targets, key_pairs, networks, + projects, quotas, security_groups, security_group_inst_assoc, + security_group_rules, services, users, + user_project_association, user_project_role_association, + user_role_association, volumes): + table.drop() diff --git a/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py new file mode 100644 index 000000000000..bd3a3e6f8c0b --- /dev/null +++ b/nova/db/sqlalchemy/migrate_repo/versions/002_bexar.py @@ -0,0 +1,209 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from sqlalchemy import * +from migrate import * + +from nova import log as logging + + +meta = MetaData() + + +# Just for the ForeignKey and column creation to succeed, these are not the +# actual definitions of instances or services. +instances = Table('instances', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + + +services = Table('services', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + + +networks = Table('networks', meta, + Column('id', Integer(), primary_key=True, nullable=False), + ) + + +# +# New Tables +# +certificates = Table('certificates', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('user_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('project_id', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('file_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +consoles = Table('consoles', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_name', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('instance_id', Integer()), + Column('password', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('port', Integer(), nullable=True), + Column('pool_id', + Integer(), + ForeignKey('console_pools.id')), + ) + + +console_pools = Table('console_pools', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('address', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('username', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('password', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('console_type', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('public_hostname', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('host', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('compute_host', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +instance_actions = Table('instance_actions', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('id', Integer(), primary_key=True, nullable=False), + Column('instance_id', + Integer(), + ForeignKey('instances.id')), + Column('action', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('error', + Text(length=None, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +# +# Tables to alter +# +auth_tokens = Table('auth_tokens', meta, + Column('created_at', DateTime(timezone=False)), + Column('updated_at', DateTime(timezone=False)), + Column('deleted_at', DateTime(timezone=False)), + Column('deleted', Boolean(create_constraint=True, name=None)), + Column('token_hash', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False), + primary_key=True, + nullable=False), + Column('user_id', Integer()), + Column('server_manageent_url', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('storage_url', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + Column('cdn_management_url', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)), + ) + + +instances_availability_zone = Column( + 'availability_zone', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + + +instances_locked = Column('locked', + Boolean(create_constraint=True, name=None)) + + +networks_cidr_v6 = Column( + 'cidr_v6', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + +networks_ra_server = Column( + 'ra_server', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + + +services_availability_zone = Column( + 'availability_zone', + String(length=255, convert_unicode=False, assert_unicode=None, + unicode_error=None, _warn_on_bytestring=False)) + + +def upgrade(migrate_engine): + # Upgrade operations go here. Don't create your own engine; + # bind migrate_engine to your metadata + meta.bind = migrate_engine + for table in (certificates, consoles, console_pools, instance_actions): + try: + table.create() + except Exception: + logging.info(repr(table)) + logging.exception('Exception while creating table') + raise + + auth_tokens.c.user_id.alter(type=String(length=255, + convert_unicode=False, + assert_unicode=None, + unicode_error=None, + _warn_on_bytestring=False)) + + instances.create_column(instances_availability_zone) + instances.create_column(instances_locked) + networks.create_column(networks_cidr_v6) + networks.create_column(networks_ra_server) + services.create_column(services_availability_zone) diff --git a/nova/db/sqlalchemy/migrate_repo/versions/__init__.py b/nova/db/sqlalchemy/migrate_repo/versions/__init__.py new file mode 100644 index 000000000000..e69de29bb2d1 diff --git a/nova/db/sqlalchemy/migration.py b/nova/db/sqlalchemy/migration.py new file mode 100644 index 000000000000..33d14827bfb8 --- /dev/null +++ b/nova/db/sqlalchemy/migration.py @@ -0,0 +1,72 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +from nova import flags + +import sqlalchemy +from migrate.versioning import api as versioning_api +from migrate.versioning import exceptions as versioning_exceptions + +FLAGS = flags.FLAGS + + +def db_sync(version=None): + db_version() + repo_path = _find_migrate_repo() + return versioning_api.upgrade(FLAGS.sql_connection, repo_path, version) + + +def db_version(): + repo_path = _find_migrate_repo() + try: + return versioning_api.db_version(FLAGS.sql_connection, repo_path) + except versioning_exceptions.DatabaseNotControlledError: + # If we aren't version controlled we may already have the database + # in the state from before we started version control, check for that + # and set up version_control appropriately + meta = sqlalchemy.MetaData() + engine = sqlalchemy.create_engine(FLAGS.sql_connection, echo=False) + meta.reflect(bind=engine) + try: + for table in ('auth_tokens', 'export_devices', 'fixed_ips', + 'floating_ips', 'instances', 'iscsi_targets', + 'key_pairs', 'networks', 'projects', 'quotas', + 'security_group_rules', + 'security_group_instance_association', 'services', + 'users', 'user_project_association', + 'user_project_role_association', 'volumes'): + assert table in meta.tables + return db_version_control(1) + except AssertionError: + return db_version_control(0) + + +def db_version_control(version=None): + repo_path = _find_migrate_repo() + versioning_api.version_control(FLAGS.sql_connection, repo_path, version) + return version + + +def _find_migrate_repo(): + """Get the path for the migrate repository.""" + path = os.path.join(os.path.abspath(os.path.dirname(__file__)), + 'migrate_repo') + assert os.path.exists(path) + return path diff --git a/nova/db/sqlalchemy/models.py b/nova/db/sqlalchemy/models.py index 62bb1780d218..c54ebe3ba588 100644 --- a/nova/db/sqlalchemy/models.py +++ b/nova/db/sqlalchemy/models.py @@ -90,53 +90,14 @@ class NovaBase(object): setattr(self, k, v) def iteritems(self): - """Make the model object behave like a dict""" - return iter(self) + """Make the model object behave like a dict. - -# TODO(vish): Store images in the database instead of file system -#class Image(BASE, NovaBase): -# """Represents an image in the datastore""" -# __tablename__ = 'images' -# id = Column(Integer, primary_key=True) -# ec2_id = Column(String(12), unique=True) -# user_id = Column(String(255)) -# project_id = Column(String(255)) -# image_type = Column(String(255)) -# public = Column(Boolean, default=False) -# state = Column(String(255)) -# location = Column(String(255)) -# arch = Column(String(255)) -# default_kernel_id = Column(String(255)) -# default_ramdisk_id = Column(String(255)) -# -# @validates('image_type') -# def validate_image_type(self, key, image_type): -# assert(image_type in ['machine', 'kernel', 'ramdisk', 'raw']) -# -# @validates('state') -# def validate_state(self, key, state): -# assert(state in ['available', 'pending', 'disabled']) -# -# @validates('default_kernel_id') -# def validate_kernel_id(self, key, val): -# if val != 'machine': -# assert(val is None) -# -# @validates('default_ramdisk_id') -# def validate_ramdisk_id(self, key, val): -# if val != 'machine': -# assert(val is None) -# -# -# TODO(vish): To make this into its own table, we need a good place to -# create the host entries. In config somwhere? Or the first -# time any object sets host? This only becomes particularly -# important if we need to store per-host data. -#class Host(BASE, NovaBase): -# """Represents a host where services are running""" -# __tablename__ = 'hosts' -# id = Column(String(255), primary_key=True) + Includes attributes from joins.""" + local = dict(self) + joined = dict([(k, v) for k, v in self.__dict__.iteritems() + if not k[0] == '_']) + local.update(joined) + return local.iteritems() class Service(BASE, NovaBase): @@ -149,6 +110,7 @@ class Service(BASE, NovaBase): topic = Column(String(255)) report_count = Column(Integer, nullable=False, default=0) disabled = Column(Boolean, default=False) + availability_zone = Column(String(255), default='nova') class Certificate(BASE, NovaBase): @@ -168,7 +130,7 @@ class Instance(BASE, NovaBase): @property def name(self): - return "instance-%08x" % self.id + return FLAGS.instance_name_template % self.id admin_pass = Column(String(255)) user_id = Column(String(255)) @@ -224,6 +186,8 @@ class Instance(BASE, NovaBase): display_name = Column(String(255)) display_description = Column(String(255)) + locked = Column(Boolean) + # TODO(vish): see Ewan's email about state improvements, probably # should be in a driver base class or some such # vmstate_state = running, halted, suspended, paused @@ -253,7 +217,7 @@ class Volume(BASE, NovaBase): @property def name(self): - return "volume-%08x" % self.id + return FLAGS.volume_name_template % self.id user_id = Column(String(255)) project_id = Column(String(255)) @@ -408,6 +372,10 @@ class Network(BASE, NovaBase): injected = Column(Boolean, default=False) cidr = Column(String(255), unique=True) + cidr_v6 = Column(String(255), unique=True) + + ra_server = Column(String(255)) + netmask = Column(String(255)) bridge = Column(String(255)) gateway = Column(String(255)) @@ -538,6 +506,31 @@ class FloatingIp(BASE, NovaBase): host = Column(String(255)) # , ForeignKey('hosts.id')) +class ConsolePool(BASE, NovaBase): + """Represents pool of consoles on the same physical node.""" + __tablename__ = 'console_pools' + id = Column(Integer, primary_key=True) + address = Column(String(255)) + username = Column(String(255)) + password = Column(String(255)) + console_type = Column(String(255)) + public_hostname = Column(String(255)) + host = Column(String(255)) + compute_host = Column(String(255)) + + +class Console(BASE, NovaBase): + """Represents a console session for an instance.""" + __tablename__ = 'consoles' + id = Column(Integer, primary_key=True) + instance_name = Column(String(255)) + instance_id = Column(Integer) + password = Column(String(255)) + port = Column(Integer, nullable=True) + pool_id = Column(Integer, ForeignKey('console_pools.id')) + pool = relationship(ConsolePool, backref=backref('consoles')) + + def register_models(): """Register Models and create metadata. @@ -550,7 +543,7 @@ def register_models(): Volume, ExportDevice, IscsiTarget, FixedIp, FloatingIp, Network, SecurityGroup, SecurityGroupIngressRule, SecurityGroupInstanceAssociation, AuthToken, User, - Project, Certificate) # , Image, Host + Project, Certificate, ConsolePool, Console) # , Image, Host engine = create_engine(FLAGS.sql_connection, echo=False) for model in models: model.metadata.create_all(engine) diff --git a/nova/exception.py b/nova/exception.py index 277033e0f324..ecd814e5d23f 100644 --- a/nova/exception.py +++ b/nova/exception.py @@ -21,9 +21,8 @@ Nova base exception handling, including decorator for re-raising Nova-type exceptions. SHOULD include dedicated exception logging. """ -import logging -import sys -import traceback +from nova import log as logging +LOG = logging.getLogger('nova.exception') class ProcessExecutionError(IOError): @@ -77,6 +76,10 @@ class InvalidInputException(Error): pass +class TimeoutException(Error): + pass + + def wrap_exception(f): def _wrap(*args, **kw): try: @@ -84,7 +87,7 @@ def wrap_exception(f): except Exception, e: if not isinstance(e, Error): #exc_type, exc_value, exc_traceback = sys.exc_info() - logging.exception(_('Uncaught exception')) + LOG.exception(_('Uncaught exception')) #logging.error(traceback.extract_stack(exc_traceback)) raise Error(str(e)) raise diff --git a/nova/fakerabbit.py b/nova/fakerabbit.py index 79d8b894d778..7c2d7177be17 100644 --- a/nova/fakerabbit.py +++ b/nova/fakerabbit.py @@ -18,12 +18,16 @@ """Based a bit on the carrot.backeds.queue backend... but a lot better.""" -import logging import Queue as queue from carrot.backends import base from eventlet import greenthread +from nova import log as logging + + +LOG = logging.getLogger("nova.fakerabbit") + EXCHANGES = {} QUEUES = {} @@ -41,12 +45,12 @@ class Exchange(object): self._routes = {} def publish(self, message, routing_key=None): - logging.debug(_('(%s) publish (key: %s) %s'), - self.name, routing_key, message) + LOG.debug(_('(%s) publish (key: %s) %s'), + self.name, routing_key, message) routing_key = routing_key.split('.')[0] if routing_key in self._routes: for f in self._routes[routing_key]: - logging.debug(_('Publishing to route %s'), f) + LOG.debug(_('Publishing to route %s'), f) f(message, routing_key=routing_key) def bind(self, callback, routing_key): @@ -76,19 +80,19 @@ class Backend(base.BaseBackend): def queue_declare(self, queue, **kwargs): global QUEUES if queue not in QUEUES: - logging.debug(_('Declaring queue %s'), queue) + LOG.debug(_('Declaring queue %s'), queue) QUEUES[queue] = Queue(queue) def exchange_declare(self, exchange, type, *args, **kwargs): global EXCHANGES if exchange not in EXCHANGES: - logging.debug(_('Declaring exchange %s'), exchange) + LOG.debug(_('Declaring exchange %s'), exchange) EXCHANGES[exchange] = Exchange(exchange, type) def queue_bind(self, queue, exchange, routing_key, **kwargs): global EXCHANGES global QUEUES - logging.debug(_('Binding %s to %s with key %s'), + LOG.debug(_('Binding %s to %s with key %s'), queue, exchange, routing_key) EXCHANGES[exchange].bind(QUEUES[queue].push, routing_key) @@ -113,7 +117,7 @@ class Backend(base.BaseBackend): content_type=content_type, content_encoding=content_encoding) message.result = True - logging.debug(_('Getting from %s: %s'), queue, message) + LOG.debug(_('Getting from %s: %s'), queue, message) return message def prepare_message(self, message_data, delivery_mode, diff --git a/nova/flags.py b/nova/flags.py index 4b73349270a3..81e2e36f99ae 100644 --- a/nova/flags.py +++ b/nova/flags.py @@ -29,8 +29,6 @@ import sys import gflags -from nova import utils - class FlagValues(gflags.FlagValues): """Extension of gflags.FlagValues that allows undefined and runtime flags. @@ -202,10 +200,22 @@ def DECLARE(name, module_string, flag_values=FLAGS): "%s not defined by %s" % (name, module_string)) +def _get_my_ip(): + """Returns the actual ip of the local machine.""" + try: + csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + csock.connect(('8.8.8.8', 80)) + (addr, port) = csock.getsockname() + csock.close() + return addr + except socket.gaierror as ex: + return "127.0.0.1" + + # __GLOBAL FLAGS ONLY__ # Define any app-specific flags in their own files, docs at: -# http://code.google.com/p/python-gflags/source/browse/trunk/gflags.py#39 - +# http://code.google.com/p/python-gflags/source/browse/trunk/gflags.py#a9 +DEFINE_string('my_ip', _get_my_ip(), 'host ip address') DEFINE_list('region_list', [], 'list of region=url pairs separated by commas') @@ -213,16 +223,25 @@ DEFINE_string('connection_type', 'libvirt', 'libvirt, xenapi or fake') DEFINE_string('aws_access_key_id', 'admin', 'AWS Access ID') DEFINE_string('aws_secret_access_key', 'admin', 'AWS Access Key') DEFINE_integer('glance_port', 9292, 'glance port') -DEFINE_string('glance_host', utils.get_my_ip(), 'glance host') +DEFINE_string('glance_host', '$my_ip', 'glance host') DEFINE_integer('s3_port', 3333, 's3 port') -DEFINE_string('s3_host', utils.get_my_ip(), 's3 host (for infrastructure)') -DEFINE_string('s3_dmz', utils.get_my_ip(), 's3 dmz ip (for instances)') +DEFINE_string('s3_host', '$my_ip', 's3 host (for infrastructure)') +DEFINE_string('s3_dmz', '$my_ip', 's3 dmz ip (for instances)') DEFINE_string('compute_topic', 'compute', 'the topic compute nodes listen on') +DEFINE_string('console_topic', 'console', + 'the topic console proxy nodes listen on') DEFINE_string('scheduler_topic', 'scheduler', 'the topic scheduler nodes listen on') DEFINE_string('volume_topic', 'volume', 'the topic volume nodes listen on') DEFINE_string('network_topic', 'network', 'the topic network nodes listen on') - +DEFINE_string('ajax_console_proxy_topic', 'ajax_proxy', + 'the topic ajax proxy nodes listen on') +DEFINE_string('ajax_console_proxy_url', + 'http://127.0.0.1:8000', + 'location of ajax console proxy, \ + in the form "http://127.0.0.1:8000"') +DEFINE_string('ajax_console_proxy_port', + 8000, 'port that ajax_console_proxy binds') DEFINE_bool('verbose', False, 'show debug output') DEFINE_boolean('fake_rabbit', False, 'use a fake rabbit') DEFINE_bool('fake_network', False, @@ -235,11 +254,15 @@ DEFINE_string('rabbit_virtual_host', '/', 'rabbit virtual host') DEFINE_integer('rabbit_retry_interval', 10, 'rabbit connection retry interval') DEFINE_integer('rabbit_max_retries', 12, 'rabbit connection attempts') DEFINE_string('control_exchange', 'nova', 'the main exchange to connect to') -DEFINE_string('ec2_prefix', 'http', 'prefix for ec2') -DEFINE_string('cc_host', utils.get_my_ip(), 'ip of api server') -DEFINE_string('cc_dmz', utils.get_my_ip(), 'internal ip of api server') -DEFINE_integer('cc_port', 8773, 'cloud controller port') -DEFINE_string('ec2_suffix', '/services/Cloud', 'suffix for ec2') +DEFINE_string('ec2_host', '$my_ip', 'ip of api server') +DEFINE_string('ec2_dmz_host', '$my_ip', 'internal ip of api server') +DEFINE_integer('ec2_port', 8773, 'cloud controller port') +DEFINE_string('ec2_scheme', 'http', 'prefix for ec2') +DEFINE_string('ec2_path', '/services/Cloud', 'suffix for ec2') +DEFINE_string('osapi_host', '$my_ip', 'ip of api server') +DEFINE_string('osapi_scheme', 'http', 'prefix for openstack') +DEFINE_integer('osapi_port', 8774, 'OpenStack API port') +DEFINE_string('osapi_path', '/v1.0/', 'suffix for openstack') DEFINE_string('default_project', 'openstack', 'default project for openstack') DEFINE_string('default_image', 'ami-11111', @@ -271,6 +294,8 @@ DEFINE_integer('sql_retry_interval', 10, 'sql connection retry interval') DEFINE_string('compute_manager', 'nova.compute.manager.ComputeManager', 'Manager for compute') +DEFINE_string('console_manager', 'nova.console.manager.ConsoleProxyManager', + 'Manager for console proxy') DEFINE_string('network_manager', 'nova.network.manager.VlanManager', 'Manager for network') DEFINE_string('volume_manager', 'nova.volume.manager.VolumeManager', @@ -285,6 +310,5 @@ DEFINE_string('image_service', 'nova.image.s3.S3ImageService', DEFINE_string('host', socket.gethostname(), 'name of this node') -# UNUSED DEFINE_string('node_availability_zone', 'nova', 'availability zone of this node') diff --git a/nova/image/glance.py b/nova/image/glance.py index cb3936df1172..593c4bce6f3e 100644 --- a/nova/image/glance.py +++ b/nova/image/glance.py @@ -14,183 +14,50 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - """Implementation of an image service that uses Glance as the backend""" +from __future__ import absolute_import import httplib import json -import logging import urlparse -import webob.exc - -from nova import utils -from nova import flags from nova import exception -import nova.image.service +from nova import flags +from nova import log as logging +from nova import utils +from nova.image import service + + +LOG = logging.getLogger('nova.image.glance') FLAGS = flags.FLAGS - -flags.DEFINE_string('glance_teller_address', 'http://127.0.0.1', - 'IP address or URL where Glance\'s Teller service resides') -flags.DEFINE_string('glance_teller_port', '9191', - 'Port for Glance\'s Teller service') -flags.DEFINE_string('glance_parallax_address', 'http://127.0.0.1', - 'IP address or URL where Glance\'s Parallax service ' - 'resides') -flags.DEFINE_string('glance_parallax_port', '9292', - 'Port for Glance\'s Parallax service') +GlanceClient = utils.import_class('glance.client.Client') -class TellerClient(object): - - def __init__(self): - self.address = FLAGS.glance_teller_address - self.port = FLAGS.glance_teller_port - url = urlparse.urlparse(self.address) - self.netloc = url.netloc - self.connection_type = {'http': httplib.HTTPConnection, - 'https': httplib.HTTPSConnection}[url.scheme] - - -class ParallaxClient(object): - - def __init__(self): - self.address = FLAGS.glance_parallax_address - self.port = FLAGS.glance_parallax_port - url = urlparse.urlparse(self.address) - self.netloc = url.netloc - self.connection_type = {'http': httplib.HTTPConnection, - 'https': httplib.HTTPSConnection}[url.scheme] - - def get_image_index(self): - """ - Returns a list of image id/name mappings from Parallax - """ - try: - c = self.connection_type(self.netloc, self.port) - c.request("GET", "images") - res = c.getresponse() - if res.status == 200: - # Parallax returns a JSONified dict(images=image_list) - data = json.loads(res.read())['images'] - return data - else: - logging.warn(_("Parallax returned HTTP error %d from " - "request for /images"), res.status_int) - return [] - finally: - c.close() - - def get_image_details(self): - """ - Returns a list of detailed image data mappings from Parallax - """ - try: - c = self.connection_type(self.netloc, self.port) - c.request("GET", "images/detail") - res = c.getresponse() - if res.status == 200: - # Parallax returns a JSONified dict(images=image_list) - data = json.loads(res.read())['images'] - return data - else: - logging.warn(_("Parallax returned HTTP error %d from " - "request for /images/detail"), res.status_int) - return [] - finally: - c.close() - - def get_image_metadata(self, image_id): - """ - Returns a mapping of image metadata from Parallax - """ - try: - c = self.connection_type(self.netloc, self.port) - c.request("GET", "images/%s" % image_id) - res = c.getresponse() - if res.status == 200: - # Parallax returns a JSONified dict(image=image_info) - data = json.loads(res.read())['image'] - return data - else: - # TODO(jaypipes): log the error? - return None - finally: - c.close() - - def add_image_metadata(self, image_metadata): - """ - Tells parallax about an image's metadata - """ - try: - c = self.connection_type(self.netloc, self.port) - body = json.dumps(image_metadata) - c.request("POST", "images", body) - res = c.getresponse() - if res.status == 200: - # Parallax returns a JSONified dict(image=image_info) - data = json.loads(res.read())['image'] - return data['id'] - else: - # TODO(jaypipes): log the error? - return None - finally: - c.close() - - def update_image_metadata(self, image_id, image_metadata): - """ - Updates Parallax's information about an image - """ - try: - c = self.connection_type(self.netloc, self.port) - body = json.dumps(image_metadata) - c.request("PUT", "images/%s" % image_id, body) - res = c.getresponse() - return res.status == 200 - finally: - c.close() - - def delete_image_metadata(self, image_id): - """ - Deletes Parallax's information about an image - """ - try: - c = self.connection_type(self.netloc, self.port) - c.request("DELETE", "images/%s" % image_id) - res = c.getresponse() - return res.status == 200 - finally: - c.close() - - -class GlanceImageService(nova.image.service.BaseImageService): +class GlanceImageService(service.BaseImageService): """Provides storage and retrieval of disk image objects within Glance.""" def __init__(self): - self.teller = TellerClient() - self.parallax = ParallaxClient() + self.client = GlanceClient(FLAGS.glance_host, FLAGS.glance_port) def index(self, context): """ - Calls out to Parallax for a list of images available + Calls out to Glance for a list of images available """ - images = self.parallax.get_image_index() - return images + return self.client.get_images() def detail(self, context): """ - Calls out to Parallax for a list of detailed image information + Calls out to Glance for a list of detailed image information """ - images = self.parallax.get_image_details() - return images + return self.client.get_images_detailed() def show(self, context, id): """ Returns a dict containing image data for the given opaque image id. """ - image = self.parallax.get_image_metadata(id) + image = self.client.get_image_meta(id) if image: return image raise exception.NotFound @@ -202,7 +69,7 @@ class GlanceImageService(nova.image.service.BaseImageService): :raises AlreadyExists if the image already exist. """ - return self.parallax.add_image_metadata(data) + return self.client.add_image(image_meta=data) def update(self, context, image_id, data): """Replace the contents of the given image with the new data. @@ -210,7 +77,7 @@ class GlanceImageService(nova.image.service.BaseImageService): :raises NotFound if the image does not exist. """ - self.parallax.update_image_metadata(image_id, data) + return self.client.update_image(image_id, data) def delete(self, context, image_id): """ @@ -219,7 +86,7 @@ class GlanceImageService(nova.image.service.BaseImageService): :raises NotFound if the image does not exist. """ - self.parallax.delete_image_metadata(image_id) + return self.client.delete_image(image_id) def delete_all(self): """ diff --git a/nova/log.py b/nova/log.py new file mode 100644 index 000000000000..4997d3f28329 --- /dev/null +++ b/nova/log.py @@ -0,0 +1,256 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Nova logging handler. + +This module adds to logging functionality by adding the option to specify +a context object when calling the various log methods. If the context object +is not specified, default formatting is used. + +It also allows setting of formatting information through flags. +""" + + +import cStringIO +import json +import logging +import logging.handlers +import traceback + +from nova import flags +from nova import version + + +FLAGS = flags.FLAGS + +flags.DEFINE_string('logging_context_format_string', + '(%(name)s %(nova_version)s): %(levelname)s ' + '[%(request_id)s %(user)s ' + '%(project)s] %(message)s', + 'format string to use for log messages') + +flags.DEFINE_string('logging_default_format_string', + '(%(name)s %(nova_version)s): %(levelname)s [N/A] ' + '%(message)s', + 'format string to use for log messages') + +flags.DEFINE_string('logging_debug_format_suffix', + 'from %(processName)s (pid=%(process)d) %(funcName)s' + ' %(pathname)s:%(lineno)d', + 'data to append to log format when level is DEBUG') + +flags.DEFINE_string('logging_exception_prefix', + '(%(name)s): TRACE: ', + 'prefix each line of exception output with this format') + +flags.DEFINE_list('default_log_levels', + ['amqplib=WARN', + 'sqlalchemy=WARN', + 'eventlet.wsgi.server=WARN'], + 'list of logger=LEVEL pairs') + +flags.DEFINE_bool('use_syslog', False, 'output to syslog') +flags.DEFINE_string('logfile', None, 'output to named file') + + +# A list of things we want to replicate from logging. +# levels +CRITICAL = logging.CRITICAL +FATAL = logging.FATAL +ERROR = logging.ERROR +WARNING = logging.WARNING +WARN = logging.WARN +INFO = logging.INFO +DEBUG = logging.DEBUG +NOTSET = logging.NOTSET +# methods +getLogger = logging.getLogger +debug = logging.debug +info = logging.info +warning = logging.warning +warn = logging.warn +error = logging.error +exception = logging.exception +critical = logging.critical +log = logging.log +# handlers +StreamHandler = logging.StreamHandler +FileHandler = logging.FileHandler +# logging.SysLogHandler is nicer than logging.logging.handler.SysLogHandler. +SysLogHandler = logging.handlers.SysLogHandler + + +# our new audit level +AUDIT = logging.INFO + 1 +logging.addLevelName(AUDIT, 'AUDIT') + + +def _dictify_context(context): + if context == None: + return None + if not isinstance(context, dict) \ + and getattr(context, 'to_dict', None): + context = context.to_dict() + return context + + +def basicConfig(): + logging.basicConfig() + for handler in logging.root.handlers: + handler.setFormatter(_formatter) + if FLAGS.verbose: + logging.root.setLevel(logging.DEBUG) + else: + logging.root.setLevel(logging.INFO) + if FLAGS.use_syslog: + syslog = SysLogHandler(address='/dev/log') + syslog.setFormatter(_formatter) + logging.root.addHandler(syslog) + if FLAGS.logfile: + logfile = FileHandler(FLAGS.logfile) + logfile.setFormatter(_formatter) + logging.root.addHandler(logfile) + + +class NovaLogger(logging.Logger): + """ + NovaLogger manages request context and formatting. + + This becomes the class that is instanciated by logging.getLogger. + """ + def __init__(self, name, level=NOTSET): + level_name = self._get_level_from_flags(name, FLAGS) + level = globals()[level_name] + logging.Logger.__init__(self, name, level) + + def _get_level_from_flags(self, name, FLAGS): + # if exactly "nova", or a child logger, honor the verbose flag + if (name == "nova" or name.startswith("nova.")) and FLAGS.verbose: + return 'DEBUG' + for pair in FLAGS.default_log_levels: + logger, _sep, level = pair.partition('=') + # NOTE(todd): if we set a.b, we want a.b.c to have the same level + # (but not a.bc, so we check the dot) + if name == logger: + return level + if name.startswith(logger) and name[len(logger)] == '.': + return level + return 'INFO' + + def _log(self, level, msg, args, exc_info=None, extra=None, context=None): + """Extract context from any log call""" + if not extra: + extra = {} + if context: + extra.update(_dictify_context(context)) + extra.update({"nova_version": version.version_string_with_vcs()}) + logging.Logger._log(self, level, msg, args, exc_info, extra) + + def addHandler(self, handler): + """Each handler gets our custom formatter""" + handler.setFormatter(_formatter) + logging.Logger.addHandler(self, handler) + + def audit(self, msg, *args, **kwargs): + """Shortcut for our AUDIT level""" + if self.isEnabledFor(AUDIT): + self._log(AUDIT, msg, args, **kwargs) + + def exception(self, msg, *args, **kwargs): + """Logging.exception doesn't handle kwargs, so breaks context""" + if not kwargs.get('exc_info'): + kwargs['exc_info'] = 1 + self.error(msg, *args, **kwargs) + # NOTE(todd): does this really go here, or in _log ? + extra = kwargs.get('extra') + if not extra: + return + env = extra.get('environment') + if env: + env = env.copy() + for k in env.keys(): + if not isinstance(env[k], str): + env.pop(k) + message = "Environment: %s" % json.dumps(env) + kwargs.pop('exc_info') + self.error(message, **kwargs) + +logging.setLoggerClass(NovaLogger) + + +class NovaRootLogger(NovaLogger): + pass + +if not isinstance(logging.root, NovaRootLogger): + logging.root = NovaRootLogger("nova.root", WARNING) + NovaLogger.root = logging.root + NovaLogger.manager.root = logging.root + + +class NovaFormatter(logging.Formatter): + """ + A nova.context.RequestContext aware formatter configured through flags. + + The flags used to set format strings are: logging_context_foramt_string + and logging_default_format_string. You can also specify + logging_debug_format_suffix to append extra formatting if the log level is + debug. + + For information about what variables are available for the formatter see: + http://docs.python.org/library/logging.html#formatter + """ + + def format(self, record): + """Uses contextstring if request_id is set, otherwise default""" + if record.__dict__.get('request_id', None): + self._fmt = FLAGS.logging_context_format_string + else: + self._fmt = FLAGS.logging_default_format_string + if record.levelno == logging.DEBUG \ + and FLAGS.logging_debug_format_suffix: + self._fmt += " " + FLAGS.logging_debug_format_suffix + # Cache this on the record, Logger will respect our formated copy + if record.exc_info: + record.exc_text = self.formatException(record.exc_info, record) + return logging.Formatter.format(self, record) + + def formatException(self, exc_info, record=None): + """Format exception output with FLAGS.logging_exception_prefix""" + if not record: + return logging.Formatter.formatException(self, exc_info) + stringbuffer = cStringIO.StringIO() + traceback.print_exception(exc_info[0], exc_info[1], exc_info[2], + None, stringbuffer) + lines = stringbuffer.getvalue().split("\n") + stringbuffer.close() + formatted_lines = [] + for line in lines: + pl = FLAGS.logging_exception_prefix % record.__dict__ + fl = "%s%s" % (pl, line) + formatted_lines.append(fl) + return "\n".join(formatted_lines) + +_formatter = NovaFormatter() + + +def audit(msg, *args, **kwargs): + """Shortcut for logging to root log with sevrity 'AUDIT'.""" + if len(logging.root.handlers) == 0: + basicConfig() + logging.root.log(AUDIT, msg, *args, **kwargs) diff --git a/nova/network/api.py b/nova/network/api.py index cbd912047cab..bf43acb519f5 100644 --- a/nova/network/api.py +++ b/nova/network/api.py @@ -20,15 +20,15 @@ Handles all requests relating to instances (guest vms). """ -import logging - from nova import db from nova import flags +from nova import log as logging from nova import quota from nova import rpc from nova.db import base FLAGS = flags.FLAGS +LOG = logging.getLogger('nova.network') class API(base.Base): @@ -36,7 +36,7 @@ class API(base.Base): def allocate_floating_ip(self, context): if quota.allowed_floating_ips(context, 1) < 1: - logging.warn(_("Quota exceeeded for %s, tried to allocate " + LOG.warn(_("Quota exceeeded for %s, tried to allocate " "address"), context.project_id) raise quota.QuotaError(_("Address quota exceeded. You cannot " diff --git a/nova/network/linux_net.py b/nova/network/linux_net.py index 931a89554683..d29e17603c9d 100644 --- a/nova/network/linux_net.py +++ b/nova/network/linux_net.py @@ -17,16 +17,17 @@ Implements vlans, bridges, and iptables rules using linux utilities. """ -import logging import os -# TODO(ja): does the definition of network_path belong here? - from nova import db from nova import flags +from nova import log as logging from nova import utils +LOG = logging.getLogger("nova.linux_net") + + def _bin_file(script): """Return the absolute path to scipt in the bin directory""" return os.path.abspath(os.path.join(__file__, "../../../bin", script)) @@ -45,10 +46,11 @@ flags.DEFINE_string('vlan_interface', 'eth0', 'network device for vlans') flags.DEFINE_string('dhcpbridge', _bin_file('nova-dhcpbridge'), 'location of nova-dhcpbridge') -flags.DEFINE_string('routing_source_ip', utils.get_my_ip(), +flags.DEFINE_string('routing_source_ip', '$my_ip', 'Public IP of network host') flags.DEFINE_bool('use_nova_chains', False, 'use the nova_ routing chains instead of default') + flags.DEFINE_string('dns_server', None, 'if set, uses specific dns server for dnsmasq') flags.DEFINE_string('dmz_cidr', '10.128.0.0/24', @@ -59,7 +61,7 @@ def metadata_forward(): """Create forwarding rule for metadata""" _confirm_rule("PREROUTING", "-t nat -s 0.0.0.0/0 " "-d 169.254.169.254/32 -p tcp -m tcp --dport 80 -j DNAT " - "--to-destination %s:%s" % (FLAGS.cc_dmz, FLAGS.cc_port)) + "--to-destination %s:%s" % (FLAGS.ec2_dmz_host, FLAGS.ec2_port)) def init_host(): @@ -172,7 +174,7 @@ def ensure_vlan(vlan_num): """Create a vlan unless it already exists""" interface = "vlan%s" % vlan_num if not _device_exists(interface): - logging.debug(_("Starting VLAN inteface %s"), interface) + LOG.debug(_("Starting VLAN inteface %s"), interface) _execute("sudo vconfig set_name_type VLAN_PLUS_VID_NO_PAD") _execute("sudo vconfig add %s %s" % (FLAGS.vlan_interface, vlan_num)) _execute("sudo ifconfig %s up" % interface) @@ -182,7 +184,7 @@ def ensure_vlan(vlan_num): def ensure_bridge(bridge, interface, net_attrs=None): """Create a bridge unless it already exists""" if not _device_exists(bridge): - logging.debug(_("Starting Bridge interface for %s"), interface) + LOG.debug(_("Starting Bridge interface for %s"), interface) _execute("sudo brctl addbr %s" % bridge) _execute("sudo brctl setfd %s 0" % bridge) # _execute("sudo brctl setageing %s 10" % bridge) @@ -195,6 +197,10 @@ def ensure_bridge(bridge, interface, net_attrs=None): net_attrs['gateway'], net_attrs['broadcast'], net_attrs['netmask'])) + if(FLAGS.use_ipv6): + _execute("sudo ifconfig %s add %s up" % \ + (bridge, + net_attrs['cidr_v6'])) else: _execute("sudo ifconfig %s up" % bridge) if FLAGS.use_nova_chains: @@ -208,6 +214,8 @@ def ensure_bridge(bridge, interface, net_attrs=None): _confirm_rule("FORWARD", "--in-interface %s -j ACCEPT" % bridge) _confirm_rule("FORWARD", "--out-interface %s -j ACCEPT" % bridge) + _execute("sudo iptables -N nova-local", check_exit_code=False) + _confirm_rule("FORWARD", "-j nova-local") def get_dhcp_hosts(context, network_id): @@ -248,9 +256,9 @@ def update_dhcp(context, network_id): _execute('sudo kill -HUP %d' % pid) return except Exception as exc: # pylint: disable-msg=W0703 - logging.debug(_("Hupping dnsmasq threw %s"), exc) + LOG.debug(_("Hupping dnsmasq threw %s"), exc) else: - logging.debug(_("Pid %d is stale, relaunching dnsmasq"), pid) + LOG.debug(_("Pid %d is stale, relaunching dnsmasq"), pid) # FLAGFILE and DNSMASQ_INTERFACE in env env = {'FLAGFILE': FLAGS.dhcpbridge_flagfile, @@ -259,6 +267,50 @@ def update_dhcp(context, network_id): _execute(command, addl_env=env) +def update_ra(context, network_id): + network_ref = db.network_get(context, network_id) + + conffile = _ra_file(network_ref['bridge'], 'conf') + with open(conffile, 'w') as f: + conf_str = """ +interface %s +{ + AdvSendAdvert on; + MinRtrAdvInterval 3; + MaxRtrAdvInterval 10; + prefix %s + { + AdvOnLink on; + AdvAutonomous on; + }; +}; +""" % (network_ref['bridge'], network_ref['cidr_v6']) + f.write(conf_str) + + # Make sure radvd can actually read it (it setuid()s to "nobody") + os.chmod(conffile, 0644) + + pid = _ra_pid_for(network_ref['bridge']) + + # if radvd is already running, then tell it to reload + if pid: + out, _err = _execute('cat /proc/%d/cmdline' + % pid, check_exit_code=False) + if conffile in out: + try: + _execute('sudo kill -HUP %d' % pid) + return + except Exception as exc: # pylint: disable-msg=W0703 + LOG.debug(_("Hupping radvd threw %s"), exc) + else: + LOG.debug(_("Pid %d is stale, relaunching radvd"), pid) + command = _ra_cmd(network_ref) + _execute(command) + db.network_update(context, network_id, + {"ra_server": + utils.get_my_linklocal(network_ref['bridge'])}) + + def _host_dhcp(fixed_ip_ref): """Return a host string for an address""" instance_ref = fixed_ip_ref['instance'] @@ -270,7 +322,7 @@ def _host_dhcp(fixed_ip_ref): def _execute(cmd, *args, **kwargs): """Wrapper around utils._execute for fake_network""" if FLAGS.fake_network: - logging.debug("FAKE NET: %s", cmd) + LOG.debug("FAKE NET: %s", cmd) return "fake", 0 else: return utils.execute(cmd, *args, **kwargs) @@ -320,6 +372,15 @@ def _dnsmasq_cmd(net): return ''.join(cmd) +def _ra_cmd(net): + """Builds radvd command""" + cmd = ['sudo -E radvd', +# ' -u nobody', + ' -C %s' % _ra_file(net['bridge'], 'conf'), + ' -p %s' % _ra_file(net['bridge'], 'pid')] + return ''.join(cmd) + + def _stop_dnsmasq(network): """Stops the dnsmasq instance for a given network""" pid = _dnsmasq_pid_for(network) @@ -328,7 +389,7 @@ def _stop_dnsmasq(network): try: _execute('sudo kill -TERM %d' % pid) except Exception as exc: # pylint: disable-msg=W0703 - logging.debug(_("Killing dnsmasq threw %s"), exc) + LOG.debug(_("Killing dnsmasq threw %s"), exc) def _dhcp_file(bridge, kind): @@ -341,6 +402,16 @@ def _dhcp_file(bridge, kind): kind)) +def _ra_file(bridge, kind): + """Return path to a pid or conf file for a bridge""" + + if not os.path.exists(FLAGS.networks_path): + os.makedirs(FLAGS.networks_path) + return os.path.abspath("%s/nova-ra-%s.%s" % (FLAGS.networks_path, + bridge, + kind)) + + def _dnsmasq_pid_for(bridge): """Returns the pid for prior dnsmasq instance for a bridge @@ -354,3 +425,18 @@ def _dnsmasq_pid_for(bridge): if os.path.exists(pid_file): with open(pid_file, 'r') as f: return int(f.read()) + + +def _ra_pid_for(bridge): + """Returns the pid for prior radvd instance for a bridge + + Returns None if no pid file exists + + If machine has rebooted pid might be incorrect (caller should check) + """ + + pid_file = _ra_file(bridge, 'pid') + + if os.path.exists(pid_file): + with open(pid_file, 'r') as f: + return int(f.read()) diff --git a/nova/network/manager.py b/nova/network/manager.py index 16aa8f895951..61de8055abfe 100644 --- a/nova/network/manager.py +++ b/nova/network/manager.py @@ -45,7 +45,6 @@ topologies. All of the network commands are issued to a subclass of """ import datetime -import logging import math import socket @@ -55,11 +54,13 @@ from nova import context from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import manager from nova import utils from nova import rpc +LOG = logging.getLogger("nova.network.manager") FLAGS = flags.FLAGS flags.DEFINE_string('flat_network_bridge', 'br100', 'Bridge for simple network instances') @@ -73,7 +74,7 @@ flags.DEFINE_string('flat_network_dhcp_start', '10.0.0.2', 'Dhcp start for FlatDhcp') flags.DEFINE_integer('vlan_start', 100, 'First VLAN for private networks') flags.DEFINE_integer('num_networks', 1000, 'Number of networks to support') -flags.DEFINE_string('vpn_ip', utils.get_my_ip(), +flags.DEFINE_string('vpn_ip', '$my_ip', 'Public IP for the cloudpipe VPN servers') flags.DEFINE_integer('vpn_start', 1000, 'First Vpn port for private networks') flags.DEFINE_integer('network_size', 256, @@ -81,6 +82,7 @@ flags.DEFINE_integer('network_size', 256, flags.DEFINE_string('floating_range', '4.4.4.0/24', 'Floating IP address block') flags.DEFINE_string('fixed_range', '10.0.0.0/8', 'Fixed IP address block') +flags.DEFINE_string('fixed_range_v6', 'fd00::/48', 'Fixed IPv6 address block') flags.DEFINE_integer('cnt_vpn_clients', 5, 'Number of addresses reserved for vpn clients') flags.DEFINE_string('network_driver', 'nova.network.linux_net', @@ -89,6 +91,9 @@ flags.DEFINE_bool('update_dhcp_on_disassociate', False, 'Whether to update dhcp when fixed_ip is disassociated') flags.DEFINE_integer('fixed_ip_disassociate_timeout', 600, 'Seconds after which a deallocated ip is disassociated') + +flags.DEFINE_bool('use_ipv6', False, + 'use the ipv6') flags.DEFINE_string('network_host', socket.gethostname(), 'Network host to use for ip allocation in flat modes') flags.DEFINE_bool('fake_call', False, @@ -131,7 +136,7 @@ class NetworkManager(manager.Manager): def set_network_host(self, context, network_id): """Safely sets the host of the network.""" - logging.debug(_("setting network host")) + LOG.debug(_("setting network host"), context=context) host = self.db.network_set_host(context, network_id, self.host) @@ -186,7 +191,7 @@ class NetworkManager(manager.Manager): def lease_fixed_ip(self, context, mac, address): """Called by dhcp-bridge when ip is leased.""" - logging.debug("Leasing IP %s", address) + LOG.debug(_("Leasing IP %s"), address, context=context) fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) instance_ref = fixed_ip_ref['instance'] if not instance_ref: @@ -201,12 +206,12 @@ class NetworkManager(manager.Manager): {'leased': True, 'updated_at': now}) if not fixed_ip_ref['allocated']: - logging.warn(_("IP %s leased that was already deallocated"), - address) + LOG.warn(_("IP %s leased that was already deallocated"), address, + context=context) def release_fixed_ip(self, context, mac, address): """Called by dhcp-bridge when ip is released.""" - logging.debug("Releasing IP %s", address) + LOG.debug("Releasing IP %s", address, context=context) fixed_ip_ref = self.db.fixed_ip_get_by_address(context, address) instance_ref = fixed_ip_ref['instance'] if not instance_ref: @@ -216,7 +221,8 @@ class NetworkManager(manager.Manager): raise exception.Error(_("IP %s released from bad mac %s vs %s") % (address, instance_ref['mac_address'], mac)) if not fixed_ip_ref['leased']: - logging.warn(_("IP %s released that was not leased"), address) + LOG.warn(_("IP %s released that was not leased"), address, + context=context) self.db.fixed_ip_update(context, fixed_ip_ref['address'], {'leased': False}) @@ -233,8 +239,8 @@ class NetworkManager(manager.Manager): """Get the network host for the current context.""" raise NotImplementedError() - def create_networks(self, context, num_networks, network_size, - *args, **kwargs): + def create_networks(self, context, cidr, num_networks, network_size, + cidr_v6, *args, **kwargs): """Create networks based on parameters.""" raise NotImplementedError() @@ -319,9 +325,11 @@ class FlatManager(NetworkManager): pass def create_networks(self, context, cidr, num_networks, network_size, - *args, **kwargs): + cidr_v6, *args, **kwargs): """Create networks based on parameters.""" fixed_net = IPy.IP(cidr) + fixed_net_v6 = IPy.IP(cidr_v6) + significant_bits_v6 = 64 for index in range(num_networks): start = index * network_size significant_bits = 32 - int(math.log(network_size, 2)) @@ -334,7 +342,13 @@ class FlatManager(NetworkManager): net['gateway'] = str(project_net[1]) net['broadcast'] = str(project_net.broadcast()) net['dhcp_start'] = str(project_net[2]) + + if(FLAGS.use_ipv6): + cidr_v6 = "%s/%s" % (fixed_net_v6[0], significant_bits_v6) + net['cidr_v6'] = cidr_v6 + network_ref = self.db.network_create_safe(context, net) + if network_ref: self._create_fixed_ips(context, network_ref['id']) @@ -437,7 +451,7 @@ class VlanManager(NetworkManager): self.host, time) if num: - logging.debug(_("Dissassociated %s stale fixed ip(s)"), num) + LOG.debug(_("Dissassociated %s stale fixed ip(s)"), num) def init_host(self): """Do any initialization that needs to be run if this is a @@ -480,12 +494,16 @@ class VlanManager(NetworkManager): network_ref['bridge']) def create_networks(self, context, cidr, num_networks, network_size, - vlan_start, vpn_start): + vlan_start, vpn_start, cidr_v6): """Create networks based on parameters.""" fixed_net = IPy.IP(cidr) + fixed_net_v6 = IPy.IP(cidr_v6) + network_size_v6 = 1 << 64 + significant_bits_v6 = 64 for index in range(num_networks): vlan = vlan_start + index start = index * network_size + start_v6 = index * network_size_v6 significant_bits = 32 - int(math.log(network_size, 2)) cidr = "%s/%s" % (fixed_net[start], significant_bits) project_net = IPy.IP(cidr) @@ -498,6 +516,11 @@ class VlanManager(NetworkManager): net['dhcp_start'] = str(project_net[3]) net['vlan'] = vlan net['bridge'] = 'br%s' % vlan + if(FLAGS.use_ipv6): + cidr_v6 = "%s/%s" % (fixed_net_v6[start_v6], + significant_bits_v6) + net['cidr_v6'] = cidr_v6 + # NOTE(vish): This makes ports unique accross the cloud, a more # robust solution would be to make them unique per ip net['vpn_public_port'] = vpn_start + index @@ -536,6 +559,7 @@ class VlanManager(NetworkManager): self.driver.ensure_vlan_bridge(network_ref['vlan'], network_ref['bridge'], network_ref) + # NOTE(vish): only ensure this forward if the address hasn't been set # manually. if address == FLAGS.vpn_ip: @@ -544,6 +568,8 @@ class VlanManager(NetworkManager): network_ref['vpn_private_address']) if not FLAGS.fake_network: self.driver.update_dhcp(context, network_id) + if(FLAGS.use_ipv6): + self.driver.update_ra(context, network_id) @property def _bottom_reserved_ips(self): diff --git a/nova/objectstore/handler.py b/nova/objectstore/handler.py index 52257f69fce1..bc26fd3c551f 100644 --- a/nova/objectstore/handler.py +++ b/nova/objectstore/handler.py @@ -39,7 +39,6 @@ S3 client with this module:: import datetime import json -import logging import multiprocessing import os import urllib @@ -54,12 +53,14 @@ from twisted.web import static from nova import context from nova import exception from nova import flags +from nova import log as logging from nova import utils from nova.auth import manager from nova.objectstore import bucket from nova.objectstore import image +LOG = logging.getLogger('nova.objectstore.handler') FLAGS = flags.FLAGS flags.DEFINE_string('s3_listen_host', '', 'Host to listen on.') @@ -132,9 +133,11 @@ def get_context(request): request.uri, headers=request.getAllHeaders(), check_type='s3') - return context.RequestContext(user, project) + rv = context.RequestContext(user, project) + LOG.audit(_("Authenticated request"), context=rv) + return rv except exception.Error as ex: - logging.debug(_("Authentication Failure: %s"), ex) + LOG.debug(_("Authentication Failure: %s"), ex) raise exception.NotAuthorized() @@ -176,7 +179,7 @@ class S3(ErrorHandlingResource): def render_GET(self, request): # pylint: disable-msg=R0201 """Renders the GET request for a list of buckets as XML""" - logging.debug('List of buckets requested') + LOG.debug(_('List of buckets requested'), context=request.context) buckets = [b for b in bucket.Bucket.all() \ if b.is_authorized(request.context)] @@ -203,7 +206,7 @@ class BucketResource(ErrorHandlingResource): def render_GET(self, request): "Returns the keys for the bucket resource""" - logging.debug("List keys for bucket %s", self.name) + LOG.debug(_("List keys for bucket %s"), self.name) try: bucket_object = bucket.Bucket(self.name) @@ -211,6 +214,8 @@ class BucketResource(ErrorHandlingResource): return error.NoResource(message="No such bucket").render(request) if not bucket_object.is_authorized(request.context): + LOG.audit(_("Unauthorized attempt to access bucket %s"), + self.name, context=request.context) raise exception.NotAuthorized() prefix = get_argument(request, "prefix", u"") @@ -227,8 +232,8 @@ class BucketResource(ErrorHandlingResource): def render_PUT(self, request): "Creates the bucket resource""" - logging.debug(_("Creating bucket %s"), self.name) - logging.debug("calling bucket.Bucket.create(%r, %r)", + LOG.debug(_("Creating bucket %s"), self.name) + LOG.debug("calling bucket.Bucket.create(%r, %r)", self.name, request.context) bucket.Bucket.create(self.name, request.context) @@ -237,10 +242,12 @@ class BucketResource(ErrorHandlingResource): def render_DELETE(self, request): """Deletes the bucket resource""" - logging.debug(_("Deleting bucket %s"), self.name) + LOG.debug(_("Deleting bucket %s"), self.name) bucket_object = bucket.Bucket(self.name) if not bucket_object.is_authorized(request.context): + LOG.audit(_("Unauthorized attempt to delete bucket %s"), + self.name, context=request.context) raise exception.NotAuthorized() bucket_object.delete() @@ -261,11 +268,12 @@ class ObjectResource(ErrorHandlingResource): Raises NotAuthorized if user in request context is not authorized to delete the object. """ - logging.debug(_("Getting object: %s / %s"), - self.bucket.name, - self.name) + LOG.debug(_("Getting object: %s / %s"), self.bucket.name, self.name) if not self.bucket.is_authorized(request.context): + LOG.audit(_("Unauthorized attempt to get object %s from bucket " + "%s"), self.name, self.bucket.name, + context=request.context) raise exception.NotAuthorized() obj = self.bucket[urllib.unquote(self.name)] @@ -281,11 +289,12 @@ class ObjectResource(ErrorHandlingResource): Raises NotAuthorized if user in request context is not authorized to delete the object. """ - logging.debug(_("Putting object: %s / %s"), - self.bucket.name, - self.name) + LOG.debug(_("Putting object: %s / %s"), self.bucket.name, self.name) if not self.bucket.is_authorized(request.context): + LOG.audit(_("Unauthorized attempt to upload object %s to bucket " + "%s"), + self.name, self.bucket.name, context=request.context) raise exception.NotAuthorized() key = urllib.unquote(self.name) @@ -302,11 +311,13 @@ class ObjectResource(ErrorHandlingResource): authorized to delete the object. """ - logging.debug(_("Deleting object: %s / %s"), - self.bucket.name, - self.name) + LOG.debug(_("Deleting object: %s / %s"), self.bucket.name, self.name, + context=request.context) if not self.bucket.is_authorized(request.context): + LOG.audit("Unauthorized attempt to delete object %s from " + "bucket %s", self.name, self.bucket.name, + context=request.context) raise exception.NotAuthorized() del self.bucket[urllib.unquote(self.name)] @@ -379,13 +390,21 @@ class ImagesResource(resource.Resource): image_path = os.path.join(FLAGS.images_path, image_id) if not image_path.startswith(FLAGS.images_path) or \ os.path.exists(image_path): + LOG.audit(_("Not authorized to upload image: invalid directory " + "%s"), + image_path, context=request.context) raise exception.NotAuthorized() bucket_object = bucket.Bucket(image_location.split("/")[0]) if not bucket_object.is_authorized(request.context): + LOG.audit(_("Not authorized to upload image: unauthorized " + "bucket %s"), bucket_object.name, + context=request.context) raise exception.NotAuthorized() + LOG.audit(_("Starting image upload: %s"), image_id, + context=request.context) p = multiprocessing.Process(target=image.Image.register_aws_image, args=(image_id, image_location, request.context)) p.start() @@ -398,17 +417,21 @@ class ImagesResource(resource.Resource): image_id = get_argument(request, 'image_id', u'') image_object = image.Image(image_id) if not image_object.is_authorized(request.context): - logging.debug(_("not authorized for render_POST in images")) + LOG.audit(_("Not authorized to update attributes of image %s"), + image_id, context=request.context) raise exception.NotAuthorized() operation = get_argument(request, 'operation', u'') if operation: # operation implies publicity toggle - logging.debug(_("handling publicity toggle")) - image_object.set_public(operation == 'add') + newstatus = (operation == 'add') + LOG.audit(_("Toggling publicity flag of image %s %r"), image_id, + newstatus, context=request.context) + image_object.set_public(newstatus) else: # other attributes imply update - logging.debug(_("update user fields")) + LOG.audit(_("Updating user fields on image %s"), image_id, + context=request.context) clean_args = {} for arg in request.args.keys(): clean_args[arg] = request.args[arg][0] @@ -421,9 +444,12 @@ class ImagesResource(resource.Resource): image_object = image.Image(image_id) if not image_object.is_authorized(request.context): + LOG.audit(_("Unauthorized attempt to delete image %s"), + image_id, context=request.context) raise exception.NotAuthorized() image_object.delete() + LOG.audit(_("Deleted image: %s"), image_id, context=request.context) request.setResponseCode(204) return '' diff --git a/nova/rpc.py b/nova/rpc.py index 844088348633..49b11602bdeb 100644 --- a/nova/rpc.py +++ b/nova/rpc.py @@ -22,7 +22,6 @@ No fan-out support yet. """ import json -import logging import sys import time import traceback @@ -36,13 +35,12 @@ from nova import context from nova import exception from nova import fakerabbit from nova import flags +from nova import log as logging from nova import utils FLAGS = flags.FLAGS - -LOG = logging.getLogger('amqplib') -LOG.setLevel(logging.DEBUG) +LOG = logging.getLogger('nova.rpc') class Connection(carrot_connection.BrokerConnection): @@ -91,15 +89,16 @@ class Consumer(messaging.Consumer): self.failed_connection = False break except: # Catching all because carrot sucks - logging.exception(_("AMQP server on %s:%d is unreachable." - " Trying again in %d seconds.") % ( - FLAGS.rabbit_host, - FLAGS.rabbit_port, - FLAGS.rabbit_retry_interval)) + LOG.exception(_("AMQP server on %s:%d is unreachable." + " Trying again in %d seconds.") % ( + FLAGS.rabbit_host, + FLAGS.rabbit_port, + FLAGS.rabbit_retry_interval)) self.failed_connection = True if self.failed_connection: - logging.exception(_("Unable to connect to AMQP server" - " after %d tries. Shutting down.") % FLAGS.rabbit_max_retries) + LOG.exception(_("Unable to connect to AMQP server " + "after %d tries. Shutting down."), + FLAGS.rabbit_max_retries) sys.exit(1) def fetch(self, no_ack=None, auto_ack=None, enable_callbacks=False): @@ -116,14 +115,14 @@ class Consumer(messaging.Consumer): self.declare() super(Consumer, self).fetch(no_ack, auto_ack, enable_callbacks) if self.failed_connection: - logging.error(_("Reconnected to queue")) + LOG.error(_("Reconnected to queue")) self.failed_connection = False # NOTE(vish): This is catching all errors because we really don't # exceptions to be logged 10 times a second if some # persistent failure occurs. except Exception: # pylint: disable-msg=W0703 if not self.failed_connection: - logging.exception(_("Failed to fetch message from queue")) + LOG.exception(_("Failed to fetch message from queue")) self.failed_connection = True def attach_to_eventlet(self): @@ -193,6 +192,7 @@ class AdapterConsumer(TopicConsumer): if msg_id: msg_reply(msg_id, rval, None) except Exception as e: + logging.exception("Exception during message handling") if msg_id: msg_reply(msg_id, None, sys.exc_info()) return @@ -242,8 +242,8 @@ def msg_reply(msg_id, reply=None, failure=None): if failure: message = str(failure[1]) tb = traceback.format_exception(*failure) - logging.error(_("Returning exception %s to caller"), message) - logging.error(tb) + LOG.error(_("Returning exception %s to caller"), message) + LOG.error(tb) failure = (failure[0].__name__, str(failure[1]), tb) conn = Connection.instance(True) publisher = DirectPublisher(connection=conn, msg_id=msg_id) diff --git a/nova/scheduler/manager.py b/nova/scheduler/manager.py index 44e21f2fdda7..a4d6dd574e8a 100644 --- a/nova/scheduler/manager.py +++ b/nova/scheduler/manager.py @@ -21,15 +21,16 @@ Scheduler Service """ -import logging import functools from nova import db from nova import flags +from nova import log as logging from nova import manager from nova import rpc from nova import utils +LOG = logging.getLogger('nova.scheduler.manager') FLAGS = flags.FLAGS flags.DEFINE_string('scheduler_driver', 'nova.scheduler.chance.ChanceScheduler', @@ -65,4 +66,4 @@ class SchedulerManager(manager.Manager): db.queue_get_for(context, topic, host), {"method": method, "args": kwargs}) - logging.debug(_("Casting to %s %s for %s"), topic, host, method) + LOG.debug(_("Casting to %s %s for %s"), topic, host, method) diff --git a/nova/scheduler/zone.py b/nova/scheduler/zone.py new file mode 100644 index 000000000000..49786cd3236c --- /dev/null +++ b/nova/scheduler/zone.py @@ -0,0 +1,56 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Openstack, LLC. +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Availability Zone Scheduler implementation +""" + +import random + +from nova.scheduler import driver +from nova import db + + +class ZoneScheduler(driver.Scheduler): + """Implements Scheduler as a random node selector.""" + + def hosts_up_with_zone(self, context, topic, zone): + """Return the list of hosts that have a running service + for topic and availability zone (if defined). + """ + + if zone is None: + return self.hosts_up(context, topic) + + services = db.service_get_all_by_topic(context, topic) + return [service.host + for service in services + if self.service_is_up(service) + and service.availability_zone == zone] + + def schedule(self, context, topic, *_args, **_kwargs): + """Picks a host that is up at random in selected + availability zone (if defined). + """ + + zone = _kwargs.get('availability_zone') + hosts = self.hosts_up_with_zone(context, topic, zone) + if not hosts: + raise driver.NoValidHost(_("No hosts found")) + return hosts[int(random.random() * len(hosts))] diff --git a/nova/service.py b/nova/service.py index 7203430c6f82..efc08fd6344c 100644 --- a/nova/service.py +++ b/nova/service.py @@ -21,7 +21,6 @@ Generic Node baseclass for all workers that run on hosts """ import inspect -import logging import os import sys import time @@ -35,10 +34,10 @@ from sqlalchemy.exc import OperationalError from nova import context from nova import db from nova import exception +from nova import log as logging from nova import flags from nova import rpc from nova import utils -from nova.db.sqlalchemy import models FLAGS = flags.FLAGS @@ -114,11 +113,13 @@ class Service(object): self.timers.append(periodic) def _create_service_ref(self, context): + zone = FLAGS.node_availability_zone service_ref = db.service_create(context, {'host': self.host, 'binary': self.binary, 'topic': self.topic, - 'report_count': 0}) + 'report_count': 0, + 'availability_zone': zone}) self.service_id = service_ref['id'] def __getattr__(self, key): @@ -155,7 +156,7 @@ class Service(object): report_interval = FLAGS.report_interval if not periodic_interval: periodic_interval = FLAGS.periodic_interval - logging.warn(_("Starting %s node"), topic) + logging.audit(_("Starting %s node"), topic) service_obj = cls(host, binary, topic, manager, report_interval, periodic_interval) @@ -208,30 +209,16 @@ class Service(object): self.model_disconnected = True logging.exception(_("model server went away")) - try: - models.register_models() - except OperationalError: - logging.exception(_("Data store is unreachable." - " Trying again in %d seconds.") % - FLAGS.sql_retry_interval) - time.sleep(FLAGS.sql_retry_interval) - def serve(*services): - argv = FLAGS(sys.argv) + FLAGS(sys.argv) + logging.basicConfig() if not services: services = [Service.create()] name = '_'.join(x.binary for x in services) - logging.debug("Serving %s" % name) - - logging.getLogger('amqplib').setLevel(logging.WARN) - - if FLAGS.verbose: - logging.getLogger().setLevel(logging.DEBUG) - else: - logging.getLogger().setLevel(logging.WARNING) + logging.debug(_("Serving %s"), name) logging.debug(_("Full set of FLAGS:")) for flag in FLAGS: diff --git a/nova/test.py b/nova/test.py index db5826c04640..881baccd5c1f 100644 --- a/nova/test.py +++ b/nova/test.py @@ -23,14 +23,10 @@ and some black magic for inline callbacks. """ import datetime -import sys -import time import unittest import mox import stubout -from twisted.internet import defer -from twisted.trial import unittest as trial_unittest from nova import context from nova import db @@ -74,7 +70,8 @@ class TestCase(unittest.TestCase): FLAGS.fixed_range, 5, 16, FLAGS.vlan_start, - FLAGS.vpn_start) + FLAGS.vpn_start, + FLAGS.fixed_range_v6) # emulate some of the mox stuff, we can't use the metaclass # because it screws with our generators @@ -139,95 +136,3 @@ class TestCase(unittest.TestCase): _wrapped.func_name = self.originalAttach.func_name rpc.Consumer.attach_to_eventlet = _wrapped - - -class TrialTestCase(trial_unittest.TestCase): - """Test case base class for all unit tests""" - def setUp(self): - """Run before each test method to initialize test environment""" - super(TrialTestCase, self).setUp() - # NOTE(vish): We need a better method for creating fixtures for tests - # now that we have some required db setup for the system - # to work properly. - self.start = datetime.datetime.utcnow() - ctxt = context.get_admin_context() - if db.network_count(ctxt) != 5: - network_manager.VlanManager().create_networks(ctxt, - FLAGS.fixed_range, - 5, 16, - FLAGS.vlan_start, - FLAGS.vpn_start) - - # emulate some of the mox stuff, we can't use the metaclass - # because it screws with our generators - self.mox = mox.Mox() - self.stubs = stubout.StubOutForTesting() - self.flag_overrides = {} - self.injected = [] - self._original_flags = FLAGS.FlagValuesDict() - - def tearDown(self): - """Runs after each test method to finalize/tear down test - environment.""" - try: - self.mox.UnsetStubs() - self.stubs.UnsetAll() - self.stubs.SmartUnsetAll() - self.mox.VerifyAll() - # NOTE(vish): Clean up any ips associated during the test. - ctxt = context.get_admin_context() - db.fixed_ip_disassociate_all_by_timeout(ctxt, FLAGS.host, - self.start) - db.network_disassociate_all(ctxt) - for x in self.injected: - try: - x.stop() - except AssertionError: - pass - - if FLAGS.fake_rabbit: - fakerabbit.reset_all() - - db.security_group_destroy_all(ctxt) - super(TrialTestCase, self).tearDown() - finally: - self.reset_flags() - - def flags(self, **kw): - """Override flag variables for a test""" - for k, v in kw.iteritems(): - if k in self.flag_overrides: - self.reset_flags() - raise Exception( - 'trying to override already overriden flag: %s' % k) - self.flag_overrides[k] = getattr(FLAGS, k) - setattr(FLAGS, k, v) - - def reset_flags(self): - """Resets all flag variables for the test. Runs after each test""" - FLAGS.Reset() - for k, v in self._original_flags.iteritems(): - setattr(FLAGS, k, v) - - def run(self, result=None): - test_method = getattr(self, self._testMethodName) - setattr(self, - self._testMethodName, - self._maybeInlineCallbacks(test_method, result)) - rv = super(TrialTestCase, self).run(result) - setattr(self, self._testMethodName, test_method) - return rv - - def _maybeInlineCallbacks(self, func, result): - def _wrapped(): - g = func() - if isinstance(g, defer.Deferred): - return g - if not hasattr(g, 'send'): - return defer.succeed(g) - - inlined = defer.inlineCallbacks(func) - d = inlined() - return d - _wrapped.func_name = func.func_name - return _wrapped diff --git a/nova/tests/__init__.py b/nova/tests/__init__.py index 8dc87d0e2df8..592d5bea9de1 100644 --- a/nova/tests/__init__.py +++ b/nova/tests/__init__.py @@ -34,3 +34,8 @@ # The code below enables nosetests to work with i18n _() blocks import __builtin__ setattr(__builtin__, '_', lambda x: x) + + +def setup(): + from nova.db import migration + migration.db_sync() diff --git a/nova/tests/api/openstack/__init__.py b/nova/tests/api/openstack/__init__.py index 9e183bd0d458..14eaaa62c713 100644 --- a/nova/tests/api/openstack/__init__.py +++ b/nova/tests/api/openstack/__init__.py @@ -15,23 +15,28 @@ # License for the specific language governing permissions and limitations # under the License. +import webob.dec import unittest from nova import context from nova import flags from nova.api.openstack.ratelimiting import RateLimitingMiddleware from nova.api.openstack.common import limited -from nova.tests.api.fakes import APIStub -from nova import utils +from nova.tests.api.openstack import fakes from webob import Request FLAGS = flags.FLAGS +@webob.dec.wsgify +def simple_wsgi(req): + return "" + + class RateLimitingMiddlewareTest(unittest.TestCase): def test_get_action_name(self): - middleware = RateLimitingMiddleware(APIStub()) + middleware = RateLimitingMiddleware(simple_wsgi) def verify(method, url, action_name): req = Request.blank(url) @@ -61,19 +66,19 @@ class RateLimitingMiddlewareTest(unittest.TestCase): self.assertTrue('Retry-After' in resp.headers) def test_single_action(self): - middleware = RateLimitingMiddleware(APIStub()) + middleware = RateLimitingMiddleware(simple_wsgi) self.exhaust(middleware, 'DELETE', '/servers/4', 'usr1', 100) self.exhaust(middleware, 'DELETE', '/servers/4', 'usr2', 100) def test_POST_servers_action_implies_POST_action(self): - middleware = RateLimitingMiddleware(APIStub()) + middleware = RateLimitingMiddleware(simple_wsgi) self.exhaust(middleware, 'POST', '/servers/4', 'usr1', 10) self.exhaust(middleware, 'POST', '/images/4', 'usr2', 10) self.assertTrue(set(middleware.limiter._levels) == \ set(['usr1:POST', 'usr1:POST servers', 'usr2:POST'])) def test_POST_servers_action_correctly_ratelimited(self): - middleware = RateLimitingMiddleware(APIStub()) + middleware = RateLimitingMiddleware(simple_wsgi) # Use up all of our "POST" allowance for the minute, 5 times for i in range(5): self.exhaust(middleware, 'POST', '/servers/4', 'usr1', 10) @@ -83,9 +88,9 @@ class RateLimitingMiddlewareTest(unittest.TestCase): self.exhaust(middleware, 'POST', '/servers/4', 'usr1', 0) def test_proxy_ctor_works(self): - middleware = RateLimitingMiddleware(APIStub()) + middleware = RateLimitingMiddleware(simple_wsgi) self.assertEqual(middleware.limiter.__class__.__name__, "Limiter") - middleware = RateLimitingMiddleware(APIStub(), service_host='foobar') + middleware = RateLimitingMiddleware(simple_wsgi, service_host='foobar') self.assertEqual(middleware.limiter.__class__.__name__, "WSGIAppProxy") diff --git a/nova/tests/api/openstack/fakes.py b/nova/tests/api/openstack/fakes.py index 291a0e46819d..fb282f1c9734 100644 --- a/nova/tests/api/openstack/fakes.py +++ b/nova/tests/api/openstack/fakes.py @@ -22,6 +22,9 @@ import string import webob import webob.dec +from paste import urlmap + +from glance import client as glance_client from nova import auth from nova import context @@ -29,6 +32,7 @@ from nova import exception as exc from nova import flags from nova import utils import nova.api.openstack.auth +from nova.api import openstack from nova.api.openstack import auth from nova.api.openstack import ratelimiting from nova.image import glance @@ -69,6 +73,17 @@ def fake_wsgi(self, req): return self.application +def wsgi_app(inner_application=None): + if not inner_application: + inner_application = openstack.APIRouter() + mapper = urlmap.URLMap() + api = openstack.FaultWrapper(auth.AuthMiddleware( + ratelimiting.RateLimitingMiddleware(inner_application))) + mapper['/v1.0'] = api + mapper['/'] = openstack.FaultWrapper(openstack.Versions()) + return mapper + + def stub_out_key_pair_funcs(stubs): def key_pair(context, user_id): return [dict(name='key', public_key='public_key')] @@ -107,7 +122,7 @@ def stub_out_rate_limiting(stubs): def stub_out_networking(stubs): def get_my_ip(): return '127.0.0.1' - stubs.Set(nova.utils, 'get_my_ip', get_my_ip) + stubs.Set(nova.flags, '_get_my_ip', get_my_ip) def stub_out_compute_api_snapshot(stubs): @@ -116,64 +131,60 @@ def stub_out_compute_api_snapshot(stubs): stubs.Set(nova.compute.API, 'snapshot', snapshot) -def stub_out_glance(stubs, initial_fixtures=[]): +def stub_out_glance(stubs, initial_fixtures=None): - class FakeParallaxClient: + class FakeGlanceClient: def __init__(self, initial_fixtures): - self.fixtures = initial_fixtures + self.fixtures = initial_fixtures or [] - def fake_get_image_index(self): + def fake_get_images(self): return [dict(id=f['id'], name=f['name']) for f in self.fixtures] - def fake_get_image_details(self): + def fake_get_images_detailed(self): return self.fixtures - def fake_get_image_metadata(self, image_id): + def fake_get_image_meta(self, image_id): for f in self.fixtures: if f['id'] == image_id: return f return None - def fake_add_image_metadata(self, image_data): + def fake_add_image(self, image_meta): id = ''.join(random.choice(string.letters) for _ in range(20)) - image_data['id'] = id - self.fixtures.append(image_data) + image_meta['id'] = id + self.fixtures.append(image_meta) return id - def fake_update_image_metadata(self, image_id, image_data): - f = self.fake_get_image_metadata(image_id) + def fake_update_image(self, image_id, image_meta): + f = self.fake_get_image_meta(image_id) if not f: raise exc.NotFound - f.update(image_data) + f.update(image_meta) - def fake_delete_image_metadata(self, image_id): - f = self.fake_get_image_metadata(image_id) + def fake_delete_image(self, image_id): + f = self.fake_get_image_meta(image_id) if not f: raise exc.NotFound self.fixtures.remove(f) - def fake_delete_all(self): - self.fixtures = [] + ##def fake_delete_all(self): + ## self.fixtures = [] - fake_parallax_client = FakeParallaxClient(initial_fixtures) - stubs.Set(nova.image.glance.ParallaxClient, 'get_image_index', - fake_parallax_client.fake_get_image_index) - stubs.Set(nova.image.glance.ParallaxClient, 'get_image_details', - fake_parallax_client.fake_get_image_details) - stubs.Set(nova.image.glance.ParallaxClient, 'get_image_metadata', - fake_parallax_client.fake_get_image_metadata) - stubs.Set(nova.image.glance.ParallaxClient, 'add_image_metadata', - fake_parallax_client.fake_add_image_metadata) - stubs.Set(nova.image.glance.ParallaxClient, 'update_image_metadata', - fake_parallax_client.fake_update_image_metadata) - stubs.Set(nova.image.glance.ParallaxClient, 'delete_image_metadata', - fake_parallax_client.fake_delete_image_metadata) - stubs.Set(nova.image.glance.GlanceImageService, 'delete_all', - fake_parallax_client.fake_delete_all) + GlanceClient = glance_client.Client + fake = FakeGlanceClient(initial_fixtures) + + stubs.Set(GlanceClient, 'get_images', fake.fake_get_images) + stubs.Set(GlanceClient, 'get_images_detailed', + fake.fake_get_images_detailed) + stubs.Set(GlanceClient, 'get_image_meta', fake.fake_get_image_meta) + stubs.Set(GlanceClient, 'add_image', fake.fake_add_image) + stubs.Set(GlanceClient, 'update_image', fake.fake_update_image) + stubs.Set(GlanceClient, 'delete_image', fake.fake_delete_image) + #stubs.Set(GlanceClient, 'delete_all', fake.fake_delete_all) class FakeToken(object): diff --git a/nova/tests/api/openstack/test_adminapi.py b/nova/tests/api/openstack/test_adminapi.py index 1b2e1654d5d1..73120c31ddb7 100644 --- a/nova/tests/api/openstack/test_adminapi.py +++ b/nova/tests/api/openstack/test_adminapi.py @@ -19,15 +19,19 @@ import unittest import stubout import webob +from paste import urlmap -import nova.api from nova import flags +from nova.api import openstack +from nova.api.openstack import ratelimiting +from nova.api.openstack import auth from nova.tests.api.openstack import fakes FLAGS = flags.FLAGS class AdminAPITest(unittest.TestCase): + def setUp(self): self.stubs = stubout.StubOutForTesting() fakes.FakeAuthManager.auth_data = {} @@ -45,7 +49,7 @@ class AdminAPITest(unittest.TestCase): FLAGS.allow_admin_api = True # We should still be able to access public operations. req = webob.Request.blank('/v1.0/flavors') - res = req.get_response(nova.api.API('os')) + res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) # TODO: Confirm admin operations are available. @@ -53,7 +57,7 @@ class AdminAPITest(unittest.TestCase): FLAGS.allow_admin_api = False # We should still be able to access public operations. req = webob.Request.blank('/v1.0/flavors') - res = req.get_response(nova.api.API('os')) + res = req.get_response(fakes.wsgi_app()) self.assertEqual(res.status_int, 200) # TODO: Confirm admin operations are unavailable. diff --git a/nova/tests/api/openstack/test_api.py b/nova/tests/api/openstack/test_api.py index d8b202e21226..db0fe1060b17 100644 --- a/nova/tests/api/openstack/test_api.py +++ b/nova/tests/api/openstack/test_api.py @@ -19,14 +19,18 @@ import unittest import webob.exc import webob.dec -import nova.api.openstack -from nova.api.openstack import API -from nova.api.openstack import faults from webob import Request +from nova.api import openstack +from nova.api.openstack import faults + class APITest(unittest.TestCase): + def _wsgi_app(self, inner_app): + # simpler version of the app than fakes.wsgi_app + return openstack.FaultWrapper(inner_app) + def test_exceptions_are_converted_to_faults(self): @webob.dec.wsgify @@ -46,29 +50,32 @@ class APITest(unittest.TestCase): exc = webob.exc.HTTPNotFound(explanation='Raised a webob.exc') return faults.Fault(exc) - api = API() - - api.application = succeed + #api.application = succeed + api = self._wsgi_app(succeed) resp = Request.blank('/').get_response(api) self.assertFalse('computeFault' in resp.body, resp.body) self.assertEqual(resp.status_int, 200, resp.body) - api.application = raise_webob_exc + #api.application = raise_webob_exc + api = self._wsgi_app(raise_webob_exc) resp = Request.blank('/').get_response(api) self.assertFalse('computeFault' in resp.body, resp.body) self.assertEqual(resp.status_int, 404, resp.body) - api.application = raise_api_fault + #api.application = raise_api_fault + api = self._wsgi_app(raise_api_fault) resp = Request.blank('/').get_response(api) self.assertTrue('itemNotFound' in resp.body, resp.body) self.assertEqual(resp.status_int, 404, resp.body) - api.application = fail + #api.application = fail + api = self._wsgi_app(fail) resp = Request.blank('/').get_response(api) self.assertTrue('{"computeFault' in resp.body, resp.body) self.assertEqual(resp.status_int, 500, resp.body) - api.application = fail + #api.application = fail + api = self._wsgi_app(fail) resp = Request.blank('/.xml').get_response(api) self.assertTrue('%s' % vdi_rec['uuid'] def VM_start(self, _1, ref, _2, _3): vm = fake.get_record('VM', ref) @@ -135,10 +177,6 @@ class FakeSessionForVolumeTests(fake.SessionBase): def __init__(self, uri): super(FakeSessionForVolumeTests, self).__init__(uri) - def VBD_plug(self, _1, ref): - rec = fake.get_record('VBD', ref) - rec['currently-attached'] = True - def VDI_introduce(self, _1, uuid, _2, _3, _4, _5, _6, _7, _8, _9, _10, _11): valid_vdi = False diff --git a/nova/twistd.py b/nova/twistd.py index 29be9c4e1457..5562719997fa 100644 --- a/nova/twistd.py +++ b/nova/twistd.py @@ -22,7 +22,6 @@ manage pid files and support syslogging. """ import gflags -import logging import os import signal import sys @@ -34,6 +33,7 @@ from twisted.python import runtime from twisted.python import usage from nova import flags +from nova import log as logging if runtime.platformType == "win32": @@ -234,22 +234,12 @@ def serve(filename): OptionsClass = WrapTwistedOptions(TwistdServerOptions) options = OptionsClass() argv = options.parseOptions() - logging.getLogger('amqplib').setLevel(logging.WARN) FLAGS.python = filename FLAGS.no_save = True if not FLAGS.pidfile: FLAGS.pidfile = '%s.pid' % name elif FLAGS.pidfile.endswith('twistd.pid'): FLAGS.pidfile = FLAGS.pidfile.replace('twistd.pid', '%s.pid' % name) - # NOTE(vish): if we're running nodaemon, redirect the log to stdout - if FLAGS.nodaemon and not FLAGS.logfile: - FLAGS.logfile = "-" - if not FLAGS.logfile: - FLAGS.logfile = '%s.log' % name - elif FLAGS.logfile.endswith('twistd.log'): - FLAGS.logfile = FLAGS.logfile.replace('twistd.log', '%s.log' % name) - if FLAGS.logdir: - FLAGS.logfile = os.path.join(FLAGS.logdir, FLAGS.logfile) if not FLAGS.prefix: FLAGS.prefix = name elif FLAGS.prefix.endswith('twisted'): @@ -270,19 +260,10 @@ def serve(filename): print 'usage: %s [options] [start|stop|restart]' % argv[0] sys.exit(1) - formatter = logging.Formatter( - '(%(name)s): %(levelname)s %(message)s') - handler = logging.StreamHandler(log.StdioOnnaStick()) - handler.setFormatter(formatter) - logging.getLogger().addHandler(handler) - - if FLAGS.verbose: - logging.getLogger().setLevel(logging.DEBUG) - else: - logging.getLogger().setLevel(logging.WARNING) - + logging.basicConfig() logging.debug(_("Full set of FLAGS:")) for flag in FLAGS: logging.debug("%s : %s" % (flag, FLAGS.get(flag, None))) + logging.audit(_("Starting %s"), name) twistd.runApp(options) diff --git a/nova/utils.py b/nova/utils.py index 15112faa21bd..6d3ddd092a2c 100644 --- a/nova/utils.py +++ b/nova/utils.py @@ -22,7 +22,7 @@ System-level utilities and helper functions. import datetime import inspect -import logging +import json import os import random import subprocess @@ -31,14 +31,18 @@ import struct import sys import time from xml.sax import saxutils +import re +import netaddr from eventlet import event from eventlet import greenthread from nova import exception from nova.exception import ProcessExecutionError +from nova import log as logging +LOG = logging.getLogger("nova.utils") TIME_FORMAT = "%Y-%m-%dT%H:%M:%SZ" @@ -109,7 +113,7 @@ def vpn_ping(address, port, timeout=0.05, session_id=None): def fetchfile(url, target): - logging.debug(_("Fetching %s") % url) + LOG.debug(_("Fetching %s") % url) # c = pycurl.Curl() # fp = open(target, "wb") # c.setopt(c.URL, url) @@ -121,7 +125,7 @@ def fetchfile(url, target): def execute(cmd, process_input=None, addl_env=None, check_exit_code=True): - logging.debug(_("Running cmd (subprocess): %s"), cmd) + LOG.debug(_("Running cmd (subprocess): %s"), cmd) env = os.environ.copy() if addl_env: env.update(addl_env) @@ -134,7 +138,7 @@ def execute(cmd, process_input=None, addl_env=None, check_exit_code=True): result = obj.communicate() obj.stdin.close() if obj.returncode: - logging.debug(_("Result was %s") % (obj.returncode)) + LOG.debug(_("Result was %s") % (obj.returncode)) if check_exit_code and obj.returncode != 0: (stdout, stderr) = result raise ProcessExecutionError(exit_code=obj.returncode, @@ -152,6 +156,11 @@ def abspath(s): return os.path.join(os.path.dirname(__file__), s) +def novadir(): + import nova + return os.path.abspath(nova.__file__).split('nova/__init__.pyc')[0] + + def default_flagfile(filename='nova.conf'): for arg in sys.argv: if arg.find('flagfile') != -1: @@ -167,12 +176,12 @@ def default_flagfile(filename='nova.conf'): def debug(arg): - logging.debug('debug in callback: %s', arg) + LOG.debug(_('debug in callback: %s'), arg) return arg def runthis(prompt, cmd, check_exit_code=True): - logging.debug(_("Running %s") % (cmd)) + LOG.debug(_("Running %s"), (cmd)) rv, err = execute(cmd, check_exit_code=check_exit_code) @@ -194,17 +203,38 @@ def last_octet(address): return int(address.split(".")[-1]) -def get_my_ip(): - """Returns the actual ip of the local machine.""" +def get_my_linklocal(interface): try: - csock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) - csock.connect(('8.8.8.8', 80)) - (addr, port) = csock.getsockname() - csock.close() - return addr - except socket.gaierror as ex: - logging.warn(_("Couldn't get IP, using 127.0.0.1 %s"), ex) - return "127.0.0.1" + if_str = execute("ip -f inet6 -o addr show %s" % interface) + condition = "\s+inet6\s+([0-9a-f:]+/\d+)\s+scope\s+link" + links = [re.search(condition, x) for x in if_str[0].split('\n')] + address = [w.group(1) for w in links if w is not None] + if address[0] is not None: + return address[0] + else: + return 'fe00::' + except IndexError as ex: + LOG.warn(_("Couldn't get Link Local IP of %s :%s"), interface, ex) + except ProcessExecutionError as ex: + LOG.warn(_("Couldn't get Link Local IP of %s :%s"), interface, ex) + except: + return 'fe00::' + + +def to_global_ipv6(prefix, mac): + mac64 = netaddr.EUI(mac).eui64().words + int_addr = int(''.join(['%02x' % i for i in mac64]), 16) + mac64_addr = netaddr.IPAddress(int_addr) + maskIP = netaddr.IPNetwork(prefix).ip + return (mac64_addr ^ netaddr.IPAddress('::0200:0:0:0') | maskIP).format() + + +def to_mac(ipv6_address): + address = netaddr.IPAddress(ipv6_address) + mask1 = netaddr.IPAddress("::ffff:ffff:ffff:ffff") + mask2 = netaddr.IPAddress("::0200:0:0:0") + mac64 = netaddr.EUI(int(address & mask1 ^ mask2)).words + return ":".join(["%02x" % i for i in mac64[0:3] + mac64[5:8]]) def utcnow(): @@ -296,7 +326,7 @@ class LazyPluggable(object): fromlist = backend self.__backend = __import__(name, None, None, fromlist) - logging.info('backend %s', self.__backend) + LOG.debug(_('backend %s'), self.__backend) return self.__backend def __getattr__(self, key): @@ -304,6 +334,20 @@ class LazyPluggable(object): return getattr(backend, key) +class LoopingCallDone(Exception): + """The poll-function passed to LoopingCall can raise this exception to + break out of the loop normally. This is somewhat analogous to + StopIteration. + + An optional return-value can be included as the argument to the exception; + this return-value will be returned by LoopingCall.wait() + """ + + def __init__(self, retvalue=True): + """:param retvalue: Value that LoopingCall.wait() should return""" + self.retvalue = retvalue + + class LoopingCall(object): def __init__(self, f=None, *args, **kw): self.args = args @@ -322,12 +366,15 @@ class LoopingCall(object): while self._running: self.f(*self.args, **self.kw) greenthread.sleep(interval) + except LoopingCallDone, e: + self.stop() + done.send(e.retvalue) except Exception: logging.exception('in looping call') done.send_exception(*sys.exc_info()) return - - done.send(True) + else: + done.send(True) self.done = done @@ -362,3 +409,36 @@ def utf8(value): return value.encode("utf-8") assert isinstance(value, str) return value + + +def to_primitive(value): + if type(value) is type([]) or type(value) is type((None,)): + o = [] + for v in value: + o.append(to_primitive(v)) + return o + elif type(value) is type({}): + o = {} + for k, v in value.iteritems(): + o[k] = to_primitive(v) + return o + elif isinstance(value, datetime.datetime): + return str(value) + elif hasattr(value, 'iteritems'): + return to_primitive(dict(value.iteritems())) + elif hasattr(value, '__iter__'): + return to_primitive(list(value)) + else: + return value + + +def dumps(value): + try: + return json.dumps(value) + except TypeError: + pass + return json.dumps(to_primitive(value)) + + +def loads(s): + return json.loads(s) diff --git a/nova/version.py b/nova/version.py new file mode 100644 index 000000000000..7b27acb6a554 --- /dev/null +++ b/nova/version.py @@ -0,0 +1,46 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2011 OpenStack LLC +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +try: + from nova.vcsversion import version_info +except ImportError: + version_info = {'branch_nick': u'LOCALBRANCH', + 'revision_id': 'LOCALREVISION', + 'revno': 0} + +NOVA_VERSION = ['2011', '1'] +YEAR, COUNT = NOVA_VERSION + +FINAL = False # This becomes true at Release Candidate time + + +def canonical_version_string(): + return '.'.join([YEAR, COUNT]) + + +def version_string(): + if FINAL: + return canonical_version_string() + else: + return '%s-dev' % (canonical_version_string(),) + + +def vcs_version_string(): + return "%s:%s" % (version_info['branch_nick'], version_info['revision_id']) + + +def version_string_with_vcs(): + return "%s-%s" % (canonical_version_string(), vcs_version_string()) diff --git a/nova/virt/connection.py b/nova/virt/connection.py index 61e99944ef38..13181b7303aa 100644 --- a/nova/virt/connection.py +++ b/nova/virt/connection.py @@ -19,15 +19,17 @@ """Abstraction of the underlying virtualization API.""" -import logging import sys from nova import flags +from nova import log as logging from nova.virt import fake from nova.virt import libvirt_conn from nova.virt import xenapi_conn +from nova.virt import hyperv +LOG = logging.getLogger("nova.virt.connection") FLAGS = flags.FLAGS @@ -62,10 +64,12 @@ def get_connection(read_only=False): conn = libvirt_conn.get_connection(read_only) elif t == 'xenapi': conn = xenapi_conn.get_connection(read_only) + elif t == 'hyperv': + conn = hyperv.get_connection(read_only) else: raise Exception('Unknown connection type "%s"' % t) if conn is None: - logging.error(_('Failed to open connection to the hypervisor')) + LOG.error(_('Failed to open connection to the hypervisor')) sys.exit(1) return conn diff --git a/nova/virt/disk.py b/nova/virt/disk.py new file mode 100644 index 000000000000..c5565abfaf8c --- /dev/null +++ b/nova/virt/disk.py @@ -0,0 +1,186 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +""" +Utility methods to resize, repartition, and modify disk images. + +Includes injection of SSH PGP keys into authorized_keys file. + +""" + +import os +import tempfile +import time + +from nova import exception +from nova import flags +from nova import log as logging +from nova import utils + + +LOG = logging.getLogger('nova.compute.disk') +FLAGS = flags.FLAGS +flags.DEFINE_integer('minimum_root_size', 1024 * 1024 * 1024 * 10, + 'minimum size in bytes of root partition') +flags.DEFINE_integer('block_size', 1024 * 1024 * 256, + 'block_size to use for dd') + + +def extend(image, size): + """Increase image to size""" + file_size = os.path.getsize(image) + if file_size >= size: + return + utils.execute('truncate -s %s %s' % (size, image)) + # NOTE(vish): attempts to resize filesystem + utils.execute('e2fsck -fp %s' % image, check_exit_code=False) + utils.execute('resize2fs %s' % image, check_exit_code=False) + + +def inject_data(image, key=None, net=None, partition=None, nbd=False): + """Injects a ssh key and optionally net data into a disk image. + + it will mount the image as a fully partitioned disk and attempt to inject + into the specified partition number. + + If partition is not specified it mounts the image as a single partition. + + """ + device = _link_device(image, nbd) + try: + if not partition is None: + # create partition + out, err = utils.execute('sudo kpartx -a %s' % device) + if err: + raise exception.Error(_('Failed to load partition: %s') % err) + mapped_device = '/dev/mapper/%sp%s' % (device.split('/')[-1], + partition) + else: + mapped_device = device + + # We can only loopback mount raw images. If the device isn't there, + # it's normally because it's a .vmdk or a .vdi etc + if not os.path.exists(mapped_device): + raise exception.Error('Mapped device was not found (we can' + ' only inject raw disk images): %s' % + mapped_device) + + # Configure ext2fs so that it doesn't auto-check every N boots + out, err = utils.execute('sudo tune2fs -c 0 -i 0 %s' % mapped_device) + + tmpdir = tempfile.mkdtemp() + try: + # mount loopback to dir + out, err = utils.execute( + 'sudo mount %s %s' % (mapped_device, tmpdir)) + if err: + raise exception.Error(_('Failed to mount filesystem: %s') + % err) + + try: + if key: + # inject key file + _inject_key_into_fs(key, tmpdir) + if net: + _inject_net_into_fs(net, tmpdir) + finally: + # unmount device + utils.execute('sudo umount %s' % mapped_device) + finally: + # remove temporary directory + utils.execute('rmdir %s' % tmpdir) + if not partition is None: + # remove partitions + utils.execute('sudo kpartx -d %s' % device) + finally: + _unlink_device(device, nbd) + + +def _link_device(image, nbd): + """Link image to device using loopback or nbd""" + if nbd: + device = _allocate_device() + utils.execute('sudo qemu-nbd -c %s %s' % (device, image)) + # NOTE(vish): this forks into another process, so give it a chance + # to set up before continuuing + for i in xrange(10): + if os.path.exists("/sys/block/%s/pid" % os.path.basename(device)): + return device + time.sleep(1) + raise exception.Error(_('nbd device %s did not show up') % device) + else: + out, err = utils.execute('sudo losetup --find --show %s' % image) + if err: + raise exception.Error(_('Could not attach image to loopback: %s') + % err) + return out.strip() + + +def _unlink_device(device, nbd): + """Unlink image from device using loopback or nbd""" + if nbd: + utils.execute('sudo qemu-nbd -d %s' % device) + _free_device(device) + else: + utils.execute('sudo losetup --detach %s' % device) + + +_DEVICES = ['/dev/nbd%s' % i for i in xrange(16)] + + +def _allocate_device(): + # NOTE(vish): This assumes no other processes are allocating nbd devices. + # It may race cause a race condition if multiple + # workers are running on a given machine. + while True: + if not _DEVICES: + raise exception.Error(_('No free nbd devices')) + device = _DEVICES.pop() + if not os.path.exists("/sys/block/%s/pid" % os.path.basename(device)): + break + return device + + +def _free_device(device): + _DEVICES.append(device) + + +def _inject_key_into_fs(key, fs): + """Add the given public ssh key to root's authorized_keys. + + key is an ssh key string. + fs is the path to the base of the filesystem into which to inject the key. + """ + sshdir = os.path.join(fs, 'root', '.ssh') + utils.execute('sudo mkdir -p %s' % sshdir) # existing dir doesn't matter + utils.execute('sudo chown root %s' % sshdir) + utils.execute('sudo chmod 700 %s' % sshdir) + keyfile = os.path.join(sshdir, 'authorized_keys') + utils.execute('sudo tee -a %s' % keyfile, '\n' + key.strip() + '\n') + + +def _inject_net_into_fs(net, fs): + """Inject /etc/network/interfaces into the filesystem rooted at fs. + + net is the contents of /etc/network/interfaces. + """ + netdir = os.path.join(os.path.join(fs, 'etc'), 'network') + utils.execute('sudo mkdir -p %s' % netdir) # existing dir doesn't matter + utils.execute('sudo chown root:root %s' % netdir) + utils.execute('sudo chmod 755 %s' % netdir) + netfile = os.path.join(netdir, 'interfaces') + utils.execute('sudo tee %s' % netfile, net) diff --git a/nova/virt/fake.py b/nova/virt/fake.py index 32541f5b48b7..f8b3c78071d5 100644 --- a/nova/virt/fake.py +++ b/nova/virt/fake.py @@ -98,7 +98,7 @@ class FakeConnection(object): the new instance. The work will be done asynchronously. This function returns a - Deferred that allows the caller to detect when it is complete. + task that allows the caller to detect when it is complete. Once this successfully completes, the instance should be running (power_state.RUNNING). @@ -122,7 +122,7 @@ class FakeConnection(object): The second parameter is the name of the snapshot. The work will be done asynchronously. This function returns a - Deferred that allows the caller to detect when it is complete. + task that allows the caller to detect when it is complete. """ pass @@ -134,7 +134,20 @@ class FakeConnection(object): and so the instance is being specified as instance.name. The work will be done asynchronously. This function returns a - Deferred that allows the caller to detect when it is complete. + task that allows the caller to detect when it is complete. + """ + pass + + def set_admin_password(self, instance, new_pass): + """ + Set the root password on the specified instance. + + The first parameter is an instance of nova.compute.service.Instance, + and so the instance is being specified as instance.name. The second + parameter is the value of the new password. + + The work will be done asynchronously. This function returns a + task that allows the caller to detect when it is complete. """ pass @@ -182,7 +195,7 @@ class FakeConnection(object): and so the instance is being specified as instance.name. The work will be done asynchronously. This function returns a - Deferred that allows the caller to detect when it is complete. + task that allows the caller to detect when it is complete. """ del self.instances[instance.name] @@ -289,6 +302,62 @@ class FakeConnection(object): def get_console_output(self, instance): return 'FAKE CONSOLE OUTPUT' + def get_ajax_console(self, instance): + return 'http://fakeajaxconsole.com/?token=FAKETOKEN' + + def get_console_pool_info(self, console_type): + return {'address': '127.0.0.1', + 'username': 'fakeuser', + 'password': 'fakepassword'} + + def refresh_security_group_rules(self, security_group_id): + """This method is called after a change to security groups. + + All security groups and their associated rules live in the datastore, + and calling this method should apply the updated rules to instances + running the specified security group. + + An error should be raised if the operation cannot complete. + + """ + return True + + def refresh_security_group_members(self, security_group_id): + """This method is called when a security group is added to an instance. + + This message is sent to the virtualization drivers on hosts that are + running an instance that belongs to a security group that has a rule + that references the security group identified by `security_group_id`. + It is the responsiblity of this method to make sure any rules + that authorize traffic flow with members of the security group are + updated and any new members can communicate, and any removed members + cannot. + + Scenario: + * we are running on host 'H0' and we have an instance 'i-0'. + * instance 'i-0' is a member of security group 'speaks-b' + * group 'speaks-b' has an ingress rule that authorizes group 'b' + * another host 'H1' runs an instance 'i-1' + * instance 'i-1' is a member of security group 'b' + + When 'i-1' launches or terminates we will recieve the message + to update members of group 'b', at which time we will make + any changes needed to the rules for instance 'i-0' to allow + or deny traffic coming from 'i-1', depending on if it is being + added or removed from the group. + + In this scenario, 'i-1' could just as easily have been running on our + host 'H0' and this method would still have been called. The point was + that this method isn't called on the host where instances of that + group are running (as is the case with + :method:`refresh_security_group_rules`) but is called where references + are made to authorizing those instances. + + An error should be raised if the operation cannot complete. + + """ + return True + class FakeInstance(object): diff --git a/nova/virt/hyperv.py b/nova/virt/hyperv.py new file mode 100644 index 000000000000..30dc1c79bae2 --- /dev/null +++ b/nova/virt/hyperv.py @@ -0,0 +1,462 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright (c) 2010 Cloud.com, Inc +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +A connection to Hyper-V . +Uses Windows Management Instrumentation (WMI) calls to interact with Hyper-V +Hyper-V WMI usage: + http://msdn.microsoft.com/en-us/library/cc723875%28v=VS.85%29.aspx +The Hyper-V object model briefly: + The physical computer and its hosted virtual machines are each represented + by the Msvm_ComputerSystem class. + + Each virtual machine is associated with a + Msvm_VirtualSystemGlobalSettingData (vs_gs_data) instance and one or more + Msvm_VirtualSystemSettingData (vmsetting) instances. For each vmsetting + there is a series of Msvm_ResourceAllocationSettingData (rasd) objects. + The rasd objects describe the settings for each device in a VM. + Together, the vs_gs_data, vmsettings and rasds describe the configuration + of the virtual machine. + + Creating new resources such as disks and nics involves cloning a default + rasd object and appropriately modifying the clone and calling the + AddVirtualSystemResources WMI method + Changing resources such as memory uses the ModifyVirtualSystemResources + WMI method + +Using the Python WMI library: + Tutorial: + http://timgolden.me.uk/python/wmi/tutorial.html + Hyper-V WMI objects can be retrieved simply by using the class name + of the WMI object and optionally specifying a column to filter the + result set. More complex filters can be formed using WQL (sql-like) + queries. + The parameters and return tuples of WMI method calls can gleaned by + examining the doc string. For example: + >>> vs_man_svc.ModifyVirtualSystemResources.__doc__ + ModifyVirtualSystemResources (ComputerSystem, ResourceSettingData[]) + => (Job, ReturnValue)' + When passing setting data (ResourceSettingData) to the WMI method, + an XML representation of the data is passed in using GetText_(1). + Available methods on a service can be determined using method.keys(): + >>> vs_man_svc.methods.keys() + vmsettings and rasds for a vm can be retrieved using the 'associators' + method with the appropriate return class. + Long running WMI commands generally return a Job (an instance of + Msvm_ConcreteJob) whose state can be polled to determine when it finishes + +""" + +import os +import time + +from nova import exception +from nova import flags +from nova import log as logging +from nova.auth import manager +from nova.compute import power_state +from nova.virt import images + +wmi = None + + +FLAGS = flags.FLAGS + + +LOG = logging.getLogger('nova.virt.hyperv') + + +HYPERV_POWER_STATE = { + 3: power_state.SHUTDOWN, + 2: power_state.RUNNING, + 32768: power_state.PAUSED, +} + + +REQ_POWER_STATE = { + 'Enabled': 2, + 'Disabled': 3, + 'Reboot': 10, + 'Reset': 11, + 'Paused': 32768, + 'Suspended': 32769, +} + + +WMI_JOB_STATUS_STARTED = 4096 +WMI_JOB_STATE_RUNNING = 4 +WMI_JOB_STATE_COMPLETED = 7 + + +def get_connection(_): + global wmi + if wmi is None: + wmi = __import__('wmi') + return HyperVConnection() + + +class HyperVConnection(object): + def __init__(self): + self._conn = wmi.WMI(moniker='//./root/virtualization') + self._cim_conn = wmi.WMI(moniker='//./root/cimv2') + + def init_host(self): + #FIXME(chiradeep): implement this + LOG.debug(_('In init host')) + pass + + def list_instances(self): + """ Return the names of all the instances known to Hyper-V. """ + vms = [v.ElementName \ + for v in self._conn.Msvm_ComputerSystem(['ElementName'])] + return vms + + def spawn(self, instance): + """ Create a new VM and start it.""" + vm = self._lookup(instance.name) + if vm is not None: + raise exception.Duplicate(_('Attempt to create duplicate vm %s') % + instance.name) + + user = manager.AuthManager().get_user(instance['user_id']) + project = manager.AuthManager().get_project(instance['project_id']) + #Fetch the file, assume it is a VHD file. + base_vhd_filename = os.path.join(FLAGS.instances_path, + instance.name) + vhdfile = "%s.vhd" % (base_vhd_filename) + images.fetch(instance['image_id'], vhdfile, user, project) + + try: + self._create_vm(instance) + + self._create_disk(instance['name'], vhdfile) + self._create_nic(instance['name'], instance['mac_address']) + + LOG.debug(_('Starting VM %s '), instance.name) + self._set_vm_state(instance['name'], 'Enabled') + LOG.info(_('Started VM %s '), instance.name) + except Exception as exn: + LOG.exception(_('spawn vm failed: %s'), exn) + self.destroy(instance) + + def _create_vm(self, instance): + """Create a VM but don't start it. """ + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + + vs_gs_data = self._conn.Msvm_VirtualSystemGlobalSettingData.new() + vs_gs_data.ElementName = instance['name'] + (job, ret_val) = vs_man_svc.DefineVirtualSystem( + [], None, vs_gs_data.GetText_(1))[1:] + if ret_val == WMI_JOB_STATUS_STARTED: + success = self._check_job_status(job) + else: + success = (ret_val == 0) + + if not success: + raise Exception(_('Failed to create VM %s'), instance.name) + + LOG.debug(_('Created VM %s...'), instance.name) + vm = self._conn.Msvm_ComputerSystem(ElementName=instance.name)[0] + + vmsettings = vm.associators( + wmi_result_class='Msvm_VirtualSystemSettingData') + vmsetting = [s for s in vmsettings + if s.SettingType == 3][0] # avoid snapshots + memsetting = vmsetting.associators( + wmi_result_class='Msvm_MemorySettingData')[0] + #No Dynamic Memory, so reservation, limit and quantity are identical. + mem = long(str(instance['memory_mb'])) + memsetting.VirtualQuantity = mem + memsetting.Reservation = mem + memsetting.Limit = mem + + (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources( + vm.path_(), [memsetting.GetText_(1)]) + LOG.debug(_('Set memory for vm %s...'), instance.name) + procsetting = vmsetting.associators( + wmi_result_class='Msvm_ProcessorSettingData')[0] + vcpus = long(instance['vcpus']) + procsetting.VirtualQuantity = vcpus + procsetting.Reservation = vcpus + procsetting.Limit = vcpus + + (job, ret_val) = vs_man_svc.ModifyVirtualSystemResources( + vm.path_(), [procsetting.GetText_(1)]) + LOG.debug(_('Set vcpus for vm %s...'), instance.name) + + def _create_disk(self, vm_name, vhdfile): + """Create a disk and attach it to the vm""" + LOG.debug(_('Creating disk for %s by attaching disk file %s'), + vm_name, vhdfile) + #Find the IDE controller for the vm. + vms = self._conn.MSVM_ComputerSystem(ElementName=vm_name) + vm = vms[0] + vmsettings = vm.associators( + wmi_result_class='Msvm_VirtualSystemSettingData') + rasds = vmsettings[0].associators( + wmi_result_class='MSVM_ResourceAllocationSettingData') + ctrller = [r for r in rasds + if r.ResourceSubType == 'Microsoft Emulated IDE Controller'\ + and r.Address == "0"] + #Find the default disk drive object for the vm and clone it. + diskdflt = self._conn.query( + "SELECT * FROM Msvm_ResourceAllocationSettingData \ + WHERE ResourceSubType LIKE 'Microsoft Synthetic Disk Drive'\ + AND InstanceID LIKE '%Default%'")[0] + diskdrive = self._clone_wmi_obj( + 'Msvm_ResourceAllocationSettingData', diskdflt) + #Set the IDE ctrller as parent. + diskdrive.Parent = ctrller[0].path_() + diskdrive.Address = 0 + #Add the cloned disk drive object to the vm. + new_resources = self._add_virt_resource(diskdrive, vm) + if new_resources is None: + raise Exception(_('Failed to add diskdrive to VM %s'), + vm_name) + diskdrive_path = new_resources[0] + LOG.debug(_('New disk drive path is %s'), diskdrive_path) + #Find the default VHD disk object. + vhddefault = self._conn.query( + "SELECT * FROM Msvm_ResourceAllocationSettingData \ + WHERE ResourceSubType LIKE 'Microsoft Virtual Hard Disk' AND \ + InstanceID LIKE '%Default%' ")[0] + + #Clone the default and point it to the image file. + vhddisk = self._clone_wmi_obj( + 'Msvm_ResourceAllocationSettingData', vhddefault) + #Set the new drive as the parent. + vhddisk.Parent = diskdrive_path + vhddisk.Connection = [vhdfile] + + #Add the new vhd object as a virtual hard disk to the vm. + new_resources = self._add_virt_resource(vhddisk, vm) + if new_resources is None: + raise Exception(_('Failed to add vhd file to VM %s'), + vm_name) + LOG.info(_('Created disk for %s'), vm_name) + + def _create_nic(self, vm_name, mac): + """Create a (emulated) nic and attach it to the vm""" + LOG.debug(_('Creating nic for %s '), vm_name) + #Find the vswitch that is connected to the physical nic. + vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name) + extswitch = self._find_external_network() + vm = vms[0] + switch_svc = self._conn.Msvm_VirtualSwitchManagementService()[0] + #Find the default nic and clone it to create a new nic for the vm. + #Use Msvm_SyntheticEthernetPortSettingData for Windows or Linux with + #Linux Integration Components installed. + emulatednics_data = self._conn.Msvm_EmulatedEthernetPortSettingData() + default_nic_data = [n for n in emulatednics_data + if n.InstanceID.rfind('Default') > 0] + new_nic_data = self._clone_wmi_obj( + 'Msvm_EmulatedEthernetPortSettingData', + default_nic_data[0]) + #Create a port on the vswitch. + (new_port, ret_val) = switch_svc.CreateSwitchPort(vm_name, vm_name, + "", extswitch.path_()) + if ret_val != 0: + LOG.error(_('Failed creating a port on the external vswitch')) + raise Exception(_('Failed creating port for %s'), + vm_name) + LOG.debug(_("Created switch port %s on switch %s"), + vm_name, extswitch.path_()) + #Connect the new nic to the new port. + new_nic_data.Connection = [new_port] + new_nic_data.ElementName = vm_name + ' nic' + new_nic_data.Address = ''.join(mac.split(':')) + new_nic_data.StaticMacAddress = 'TRUE' + #Add the new nic to the vm. + new_resources = self._add_virt_resource(new_nic_data, vm) + if new_resources is None: + raise Exception(_('Failed to add nic to VM %s'), + vm_name) + LOG.info(_("Created nic for %s "), vm_name) + + def _add_virt_resource(self, res_setting_data, target_vm): + """Add a new resource (disk/nic) to the VM""" + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + (job, new_resources, ret_val) = vs_man_svc.\ + AddVirtualSystemResources([res_setting_data.GetText_(1)], + target_vm.path_()) + success = True + if ret_val == WMI_JOB_STATUS_STARTED: + success = self._check_job_status(job) + else: + success = (ret_val == 0) + if success: + return new_resources + else: + return None + + #TODO: use the reactor to poll instead of sleep + def _check_job_status(self, jobpath): + """Poll WMI job state for completion""" + #Jobs have a path of the form: + #\\WIN-P5IG7367DAG\root\virtualization:Msvm_ConcreteJob.InstanceID= + #"8A496B9C-AF4D-4E98-BD3C-1128CD85320D" + inst_id = jobpath.split('=')[1].strip('"') + jobs = self._conn.Msvm_ConcreteJob(InstanceID=inst_id) + if len(jobs) == 0: + return False + job = jobs[0] + while job.JobState == WMI_JOB_STATE_RUNNING: + time.sleep(0.1) + job = self._conn.Msvm_ConcreteJob(InstanceID=inst_id)[0] + if job.JobState != WMI_JOB_STATE_COMPLETED: + LOG.debug(_("WMI job failed: %s"), job.ErrorSummaryDescription) + return False + LOG.debug(_("WMI job succeeded: %s, Elapsed=%s "), job.Description, + job.ElapsedTime) + return True + + def _find_external_network(self): + """Find the vswitch that is connected to the physical nic. + Assumes only one physical nic on the host + """ + #If there are no physical nics connected to networks, return. + bound = self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE') + if len(bound) == 0: + return None + return self._conn.Msvm_ExternalEthernetPort(IsBound='TRUE')[0]\ + .associators(wmi_result_class='Msvm_SwitchLANEndpoint')[0]\ + .associators(wmi_result_class='Msvm_SwitchPort')[0]\ + .associators(wmi_result_class='Msvm_VirtualSwitch')[0] + + def _clone_wmi_obj(self, wmi_class, wmi_obj): + """Clone a WMI object""" + cl = self._conn.__getattr__(wmi_class) # get the class + newinst = cl.new() + #Copy the properties from the original. + for prop in wmi_obj._properties: + newinst.Properties_.Item(prop).Value =\ + wmi_obj.Properties_.Item(prop).Value + return newinst + + def reboot(self, instance): + """Reboot the specified instance.""" + vm = self._lookup(instance.name) + if vm is None: + raise exception.NotFound('instance not present %s' % instance.name) + self._set_vm_state(instance.name, 'Reboot') + + def destroy(self, instance): + """Destroy the VM. Also destroy the associated VHD disk files""" + LOG.debug(_("Got request to destroy vm %s"), instance.name) + vm = self._lookup(instance.name) + if vm is None: + return + vm = self._conn.Msvm_ComputerSystem(ElementName=instance.name)[0] + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + #Stop the VM first. + self._set_vm_state(instance.name, 'Disabled') + vmsettings = vm.associators( + wmi_result_class='Msvm_VirtualSystemSettingData') + rasds = vmsettings[0].associators( + wmi_result_class='MSVM_ResourceAllocationSettingData') + disks = [r for r in rasds \ + if r.ResourceSubType == 'Microsoft Virtual Hard Disk'] + diskfiles = [] + #Collect disk file information before destroying the VM. + for disk in disks: + diskfiles.extend([c for c in disk.Connection]) + #Nuke the VM. Does not destroy disks. + (job, ret_val) = vs_man_svc.DestroyVirtualSystem(vm.path_()) + if ret_val == WMI_JOB_STATUS_STARTED: + success = self._check_job_status(job) + elif ret_val == 0: + success = True + if not success: + raise Exception(_('Failed to destroy vm %s') % instance.name) + #Delete associated vhd disk files. + for disk in diskfiles: + vhdfile = self._cim_conn.CIM_DataFile(Name=disk) + for vf in vhdfile: + vf.Delete() + LOG.debug(_("Del: disk %s vm %s"), vhdfile, instance.name) + + def get_info(self, instance_id): + """Get information about the VM""" + vm = self._lookup(instance_id) + if vm is None: + raise exception.NotFound('instance not present %s' % instance_id) + vm = self._conn.Msvm_ComputerSystem(ElementName=instance_id)[0] + vs_man_svc = self._conn.Msvm_VirtualSystemManagementService()[0] + vmsettings = vm.associators( + wmi_result_class='Msvm_VirtualSystemSettingData') + settings_paths = [v.path_() for v in vmsettings] + #See http://msdn.microsoft.com/en-us/library/cc160706%28VS.85%29.aspx + summary_info = vs_man_svc.GetSummaryInformation( + [4, 100, 103, 105], settings_paths)[1] + info = summary_info[0] + LOG.debug(_("Got Info for vm %s: state=%s, mem=%s, num_cpu=%s, \ + cpu_time=%s"), instance_id, + str(HYPERV_POWER_STATE[info.EnabledState]), + str(info.MemoryUsage), + str(info.NumberOfProcessors), + str(info.UpTime)) + + return {'state': HYPERV_POWER_STATE[info.EnabledState], + 'max_mem': info.MemoryUsage, + 'mem': info.MemoryUsage, + 'num_cpu': info.NumberOfProcessors, + 'cpu_time': info.UpTime} + + def _lookup(self, i): + vms = self._conn.Msvm_ComputerSystem(ElementName=i) + n = len(vms) + if n == 0: + return None + elif n > 1: + raise Exception(_('duplicate name found: %s') % i) + else: + return vms[0].ElementName + + def _set_vm_state(self, vm_name, req_state): + """Set the desired state of the VM""" + vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name) + if len(vms) == 0: + return False + (job, ret_val) = vms[0].RequestStateChange(REQ_POWER_STATE[req_state]) + success = False + if ret_val == WMI_JOB_STATUS_STARTED: + success = self._check_job_status(job) + elif ret_val == 0: + success = True + elif ret_val == 32775: + #Invalid state for current operation. Typically means it is + #already in the state requested + success = True + if success: + LOG.info(_("Successfully changed vm state of %s to %s"), vm_name, + req_state) + else: + LOG.error(_("Failed to change vm state of %s to %s"), vm_name, + req_state) + raise Exception(_("Failed to change vm state of %s to %s"), + vm_name, req_state) + + def attach_volume(self, instance_name, device_path, mountpoint): + vm = self._lookup(instance_name) + if vm is None: + raise exception.NotFound('Cannot attach volume to missing %s vm' % + instance_name) + + def detach_volume(self, instance_name, mountpoint): + vm = self._lookup(instance_name) + if vm is None: + raise exception.NotFound('Cannot detach volume from missing %s ' % + instance_name) diff --git a/nova/virt/images.py b/nova/virt/images.py index 1c9b2e093401..ecf0e5efbc48 100644 --- a/nova/virt/images.py +++ b/nova/virt/images.py @@ -22,10 +22,14 @@ Handling of VM disk images. """ import os.path +import shutil +import sys import time +import urllib2 import urlparse from nova import flags +from nova import log as logging from nova import utils from nova.auth import manager from nova.auth import signer @@ -36,6 +40,8 @@ FLAGS = flags.FLAGS flags.DEFINE_bool('use_s3', True, 'whether to get images from s3 or use local copy') +LOG = logging.getLogger('nova.virt.images') + def fetch(image, path, user, project): if FLAGS.use_s3: @@ -45,6 +51,25 @@ def fetch(image, path, user, project): return f(image, path, user, project) +def _fetch_image_no_curl(url, path, headers): + request = urllib2.Request(url) + for (k, v) in headers.iteritems(): + request.add_header(k, v) + + def urlretrieve(urlfile, fpath): + chunk = 1 * 1024 * 1024 + f = open(fpath, "wb") + while 1: + data = urlfile.read(chunk) + if not data: + break + f.write(data) + + urlopened = urllib2.urlopen(request) + urlretrieve(urlopened, path) + LOG.debug(_("Finished retreving %s -- placed in %s"), url, path) + + def _fetch_s3_image(image, path, user, project): url = image_url(image) @@ -61,18 +86,24 @@ def _fetch_s3_image(image, path, user, project): url_path) headers['Authorization'] = 'AWS %s:%s' % (access, signature) - cmd = ['/usr/bin/curl', '--fail', '--silent', url] - for (k, v) in headers.iteritems(): - cmd += ['-H', '"%s: %s"' % (k, v)] + if sys.platform.startswith('win'): + return _fetch_image_no_curl(url, path, headers) + else: + cmd = ['/usr/bin/curl', '--fail', '--silent', url] + for (k, v) in headers.iteritems(): + cmd += ['-H', '\'%s: %s\'' % (k, v)] - cmd += ['-o', path] - cmd_out = ' '.join(cmd) - return utils.execute(cmd_out) + cmd += ['-o', path] + cmd_out = ' '.join(cmd) + return utils.execute(cmd_out) def _fetch_local_image(image, path, user, project): - source = _image_path('%s/image' % image) - return utils.execute('cp %s %s' % (source, path)) + source = _image_path(os.path.join(image, 'image')) + if sys.platform.startswith('win'): + return shutil.copy(source, path) + else: + return utils.execute('cp %s %s' % (source, path)) def _image_path(path): diff --git a/nova/virt/libvirt.xml.template b/nova/virt/libvirt.xml.template index 3fb2243dacbd..8139c3620bf2 100644 --- a/nova/virt/libvirt.xml.template +++ b/nova/virt/libvirt.xml.template @@ -7,28 +7,28 @@ #set $disk_bus = 'uml' uml /usr/bin/linux - /dev/ubda1 + /dev/ubda #else #if $type == 'xen' #set $disk_prefix = 'sd' #set $disk_bus = 'scsi' linux - /dev/xvda1 + /dev/xvda #else #set $disk_prefix = 'vd' #set $disk_bus = 'virtio' hvm - #end if + #end if #if $getVar('rescue', False) - ${basepath}/rescue-kernel - ${basepath}/rescue-ramdisk + ${basepath}/kernel.rescue + ${basepath}/ramdisk.rescue #else #if $getVar('kernel', None) ${kernel} #if $type == 'xen' ro #else - root=/dev/vda1 console=ttyS0 + root=/dev/vda console=ttyS0 #end if #if $getVar('ramdisk', None) ${ramdisk} @@ -46,18 +46,28 @@ #if $getVar('rescue', False) - + + + #else + + #if $getVar('local', False) + + + + + + #end if #end if @@ -66,14 +76,28 @@ + #if $getVar('extra_params', False) ${extra_params} #end if + + + + + + + + + + + + + diff --git a/nova/virt/libvirt_conn.py b/nova/virt/libvirt_conn.py index 00edfbdc8ddd..f5b0bd365ea0 100644 --- a/nova/virt/libvirt_conn.py +++ b/nova/virt/libvirt_conn.py @@ -36,9 +36,13 @@ Supports KVM, QEMU, UML, and XEN. """ -import logging import os import shutil +import random +import subprocess +import uuid +from xml.dom import minidom + from eventlet import greenthread from eventlet import event @@ -50,18 +54,20 @@ from nova import context from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import utils #from nova.api import context from nova.auth import manager -from nova.compute import disk from nova.compute import instance_types from nova.compute import power_state +from nova.virt import disk from nova.virt import images libvirt = None libxml2 = None Template = None +LOG = logging.getLogger('nova.virt.libvirt_conn') FLAGS = flags.FLAGS # TODO(vish): These flags should probably go into a shared location @@ -85,6 +91,15 @@ flags.DEFINE_string('libvirt_uri', flags.DEFINE_bool('allow_project_net_traffic', True, 'Whether to allow in project network traffic') +flags.DEFINE_bool('use_cow_images', + True, + 'Whether to use cow images') +flags.DEFINE_string('ajaxterm_portrange', + '10000-12000', + 'Range of ports that ajaxterm should randomly try to bind') +flags.DEFINE_string('firewall_driver', + 'nova.virt.libvirt_conn.IptablesFirewallDriver', + 'Firewall driver (defaults to iptables)') def get_connection(read_only): @@ -115,6 +130,16 @@ def _get_net_and_mask(cidr): return str(net.net()), str(net.netmask()) +def _get_net_and_prefixlen(cidr): + net = IPy.IP(cidr) + return str(net.net()), str(net.prefixlen()) + + +def _get_ip_version(cidr): + net = IPy.IP(cidr) + return int(net.version()) + + class LibvirtConnection(object): def __init__(self, read_only): @@ -124,16 +149,24 @@ class LibvirtConnection(object): self._wrapped_conn = None self.read_only = read_only - def init_host(self): - NWFilterFirewall(self._conn).setup_base_nwfilters() + self.nwfilter = NWFilterFirewall(self._get_connection) - @property - def _conn(self): + if not FLAGS.firewall_driver: + self.firewall_driver = self.nwfilter + self.nwfilter.handle_security_groups = True + else: + self.firewall_driver = utils.import_object(FLAGS.firewall_driver) + + def init_host(self): + pass + + def _get_connection(self): if not self._wrapped_conn or not self._test_connection(): - logging.debug(_('Connecting to libvirt: %s') % self.libvirt_uri) + LOG.debug(_('Connecting to libvirt: %s'), self.libvirt_uri) self._wrapped_conn = self._connect(self.libvirt_uri, self.read_only) return self._wrapped_conn + _conn = property(_get_connection) def _test_connection(self): try: @@ -142,7 +175,7 @@ class LibvirtConnection(object): except libvirt.libvirtError as e: if e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and \ e.get_error_domain() == libvirt.VIR_FROM_REMOTE: - logging.debug(_('Connection to libvirt broke')) + LOG.debug(_('Connection to libvirt broke')) return False raise @@ -177,45 +210,34 @@ class LibvirtConnection(object): pass # If the instance is already terminated, we're still happy - done = event.Event() - # We'll save this for when we do shutdown, # instead of destroy - but destroy returns immediately timer = utils.LoopingCall(f=None) - def _wait_for_shutdown(): + while True: try: state = self.get_info(instance['name'])['state'] db.instance_set_state(context.get_admin_context(), instance['id'], state) if state == power_state.SHUTDOWN: - timer.stop() + break except Exception: db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) - timer.stop() + break - timer.f = _wait_for_shutdown - timer_done = timer.start(interval=0.5, now=True) + self.firewall_driver.unfilter_instance(instance) - # NOTE(termie): this is strictly superfluous (we could put the - # cleanup code in the timer), but this emulates the - # previous model so I am keeping it around until - # everything has been vetted a bit - def _wait_for_timer(): - timer_done.wait() - if cleanup: - self._cleanup(instance) - done.send() + if cleanup: + self._cleanup(instance) - greenthread.spawn(_wait_for_timer) - return done + return True def _cleanup(self, instance): target = os.path.join(FLAGS.instances_path, instance['name']) - logging.info(_('instance %s: deleting instance files %s'), - instance['name'], target) + LOG.info(_('instance %s: deleting instance files %s'), + instance['name'], target) if os.path.exists(target): shutil.rmtree(target) @@ -223,11 +245,24 @@ class LibvirtConnection(object): def attach_volume(self, instance_name, device_path, mountpoint): virt_dom = self._conn.lookupByName(instance_name) mount_device = mountpoint.rpartition("/")[2] - xml = """ - - - - """ % (device_path, mount_device) + if device_path.startswith('/dev/'): + xml = """ + + + + """ % (device_path, mount_device) + elif ':' in device_path: + (protocol, name) = device_path.split(':') + xml = """ + + + + """ % (protocol, + name, + mount_device) + else: + raise exception.Invalid(_("Invalid device path %s") % device_path) + virt_dom.attachDevice(xml) def _get_disk_xml(self, xml, device): @@ -260,7 +295,7 @@ class LibvirtConnection(object): virt_dom.detachDevice(xml) @exception.wrap_exception - def snapshot(self, instance, name): + def snapshot(self, instance, image_id): """ Create snapshot from a running VM instance """ raise NotImplementedError( _("Instance snapshotting is not supported for libvirt" @@ -279,10 +314,10 @@ class LibvirtConnection(object): db.instance_set_state(context.get_admin_context(), instance['id'], state) if state == power_state.RUNNING: - logging.debug(_('instance %s: rebooted'), instance['name']) + LOG.debug(_('instance %s: rebooted'), instance['name']) timer.stop() except Exception, exn: - logging.error(_('_wait_for_reboot failed: %s'), exn) + LOG.exception(_('_wait_for_reboot failed: %s'), exn) db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) @@ -315,7 +350,7 @@ class LibvirtConnection(object): rescue_images = {'image_id': FLAGS.rescue_image_id, 'kernel_id': FLAGS.rescue_kernel_id, 'ramdisk_id': FLAGS.rescue_ramdisk_id} - self._create_image(instance, xml, 'rescue-', rescue_images) + self._create_image(instance, xml, '.rescue', rescue_images) self._conn.createXML(xml, 0) timer = utils.LoopingCall(f=None) @@ -325,10 +360,10 @@ class LibvirtConnection(object): state = self.get_info(instance['name'])['state'] db.instance_set_state(None, instance['id'], state) if state == power_state.RUNNING: - logging.debug(_('instance %s: rescued'), instance['name']) + LOG.debug(_('instance %s: rescued'), instance['name']) timer.stop() except Exception, exn: - logging.error(_('_wait_for_rescue failed: %s'), exn) + LOG.exception(_('_wait_for_rescue failed: %s'), exn) db.instance_set_state(None, instance['id'], power_state.SHUTDOWN) @@ -350,10 +385,12 @@ class LibvirtConnection(object): instance['id'], power_state.NOSTATE, 'launching') - NWFilterFirewall(self._conn).setup_nwfilters_for_instance(instance) + self.nwfilter.setup_basic_filtering(instance) + self.firewall_driver.prepare_instance_filter(instance) self._create_image(instance, xml) self._conn.createXML(xml, 0) - logging.debug(_("instance %s: is running"), instance['name']) + LOG.debug(_("instance %s: is running"), instance['name']) + self.firewall_driver.apply_instance_filter(instance) timer = utils.LoopingCall(f=None) @@ -363,11 +400,11 @@ class LibvirtConnection(object): db.instance_set_state(context.get_admin_context(), instance['id'], state) if state == power_state.RUNNING: - logging.debug(_('instance %s: booted'), instance['name']) + LOG.debug(_('instance %s: booted'), instance['name']) timer.stop() except: - logging.exception(_('instance %s: failed to boot'), - instance['name']) + LOG.exception(_('instance %s: failed to boot'), + instance['name']) db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) @@ -377,11 +414,11 @@ class LibvirtConnection(object): return timer.start(interval=0.5, now=True) def _flush_xen_console(self, virsh_output): - logging.info('virsh said: %r' % (virsh_output,)) + LOG.info(_('virsh said: %r'), virsh_output) virsh_output = virsh_output[0].strip() if virsh_output.startswith('/dev/'): - logging.info(_('cool, it\'s a device')) + LOG.info(_('cool, it\'s a device')) out, err = utils.execute("sudo dd if=%s iflag=nonblock" % virsh_output, check_exit_code=False) return out @@ -389,7 +426,7 @@ class LibvirtConnection(object): return '' def _append_to_file(self, data, fpath): - logging.info(_('data: %r, fpath: %r') % (data, fpath)) + LOG.info(_('data: %r, fpath: %r'), data, fpath) fp = open(fpath, 'a+') fp.write(data) return fpath @@ -397,7 +434,7 @@ class LibvirtConnection(object): def _dump_file(self, fpath): fp = open(fpath, 'r+') contents = fp.read() - logging.info('Contents: %r' % (contents,)) + LOG.info(_('Contents of file %s: %r'), fpath, contents) return contents @exception.wrap_exception @@ -418,25 +455,100 @@ class LibvirtConnection(object): return self._dump_file(fpath) - def _create_image(self, inst, libvirt_xml, prefix='', disk_images=None): + @exception.wrap_exception + def get_ajax_console(self, instance): + def get_open_port(): + start_port, end_port = FLAGS.ajaxterm_portrange.split("-") + for i in xrange(0, 100): # don't loop forever + port = random.randint(int(start_port), int(end_port)) + # netcat will exit with 0 only if the port is in use, + # so a nonzero return value implies it is unused + cmd = 'netcat 0.0.0.0 %s -w 1 \n" "\n") % (net, mask) + "value=\"%s\" />\n" + "\n" + "\n") % \ + (net, mask, net_v6, prefixlen_v6) + else: + net, mask = _get_net_and_mask(network['cidr']) + extra_params = ("\n" + "\n") % \ + (net, mask) else: extra_params = "\n" + if FLAGS.use_cow_images: + driver_type = 'qcow2' + else: + driver_type = 'raw' xml_info = {'type': FLAGS.libvirt_type, 'name': instance['name'], @@ -557,8 +698,11 @@ class LibvirtConnection(object): 'mac_address': instance['mac_address'], 'ip_address': ip_address, 'dhcp_server': dhcp_server, + 'ra_server': ra_server, 'extra_params': extra_params, - 'rescue': rescue} + 'rescue': rescue, + 'local': instance_type['local_gb'], + 'driver_type': driver_type} if not rescue: if instance['kernel_id']: xml_info['kernel'] = xml_info['basepath'] + "/kernel" @@ -569,7 +713,7 @@ class LibvirtConnection(object): xml_info['disk'] = xml_info['basepath'] + "/disk" xml = str(Template(self.libvirt_xml, searchList=[xml_info])) - logging.debug(_('instance %s: finished toXML method'), + LOG.debug(_('instance %s: finished toXML method'), instance['name']) return xml @@ -690,18 +834,67 @@ class LibvirtConnection(object): domain = self._conn.lookupByName(instance_name) return domain.interfaceStats(interface) - def refresh_security_group(self, security_group_id): - fw = NWFilterFirewall(self._conn) - fw.ensure_security_group_filter(security_group_id) + def get_console_pool_info(self, console_type): + #TODO(mdragon): console proxy should be implemented for libvirt, + # in case someone wants to use it with kvm or + # such. For now return fake data. + return {'address': '127.0.0.1', + 'username': 'fakeuser', + 'password': 'fakepassword'} + + def refresh_security_group_rules(self, security_group_id): + self.firewall_driver.refresh_security_group_rules(security_group_id) + + def refresh_security_group_members(self, security_group_id): + self.firewall_driver.refresh_security_group_members(security_group_id) -class NWFilterFirewall(object): +class FirewallDriver(object): + def prepare_instance_filter(self, instance): + """Prepare filters for the instance. + + At this point, the instance isn't running yet.""" + raise NotImplementedError() + + def unfilter_instance(self, instance): + """Stop filtering instance""" + raise NotImplementedError() + + def apply_instance_filter(self, instance): + """Apply instance filter. + + Once this method returns, the instance should be firewalled + appropriately. This method should as far as possible be a + no-op. It's vastly preferred to get everything set up in + prepare_instance_filter. + """ + raise NotImplementedError() + + def refresh_security_group_rules(self, security_group_id): + """Refresh security group rules from data store + + Gets called when a rule has been added to or removed from + the security group.""" + raise NotImplementedError() + + def refresh_security_group_members(self, security_group_id): + """Refresh security group members from data store + + Gets called when an instance gets added to or removed from + the security group.""" + raise NotImplementedError() + + +class NWFilterFirewall(FirewallDriver): """ This class implements a network filtering mechanism versatile enough for EC2 style Security Group filtering by leveraging libvirt's nwfilter. First, all instances get a filter ("nova-base-filter") applied. + This filter provides some basic security such as protection against + MAC spoofing, IP spoofing, and ARP spoofing. + This filter drops all incoming ipv4 and ipv6 connections. Outgoing connections are never blocked. @@ -735,39 +928,92 @@ class NWFilterFirewall(object): (*) This sentence brought to you by the redundancy department of redundancy. + """ def __init__(self, get_connection): - self._conn = get_connection + self._libvirt_get_connection = get_connection + self.static_filters_configured = False + self.handle_security_groups = False - nova_base_filter = ''' - 26717364-50cf-42d1-8185-29bf893ab110 - - - - - - - - ''' + def _get_connection(self): + return self._libvirt_get_connection() + _conn = property(_get_connection) - nova_dhcp_filter = ''' - 891e4787-e5c0-d59b-cbd6-41bc3c6b36fc - one, so it uses + ebtables to allow traffic through. Without a corresponding rule in + iptables, it'll get blocked anyway.""" + + return ''' + 891e4787-e5c0-d59b-cbd6-41bc3c6b36fc + + + + + + + ''' + + def nova_ra_filter(self): + return ''' + d707fa71-4fb5-4b27-9ab7-ba5ca19c8804 + - - - - + ''' + def setup_basic_filtering(self, instance): + """Set up basic filtering (MAC, IP, and ARP spoofing protection)""" + logging.info('called setup_basic_filtering in nwfilter') + + if self.handle_security_groups: + # No point in setting up a filter set that we'll be overriding + # anyway. + return + + logging.info('ensuring static filters') + self._ensure_static_filters() + + instance_filter_name = self._instance_filter_name(instance) + self._define_filter(self._filter_container(instance_filter_name, + ['nova-base'])) + + def _ensure_static_filters(self): + if self.static_filters_configured: + return + + self._define_filter(self._filter_container('nova-base', + ['no-mac-spoofing', + 'no-ip-spoofing', + 'no-arp-spoofing', + 'allow-dhcp-server'])) + self._define_filter(self.nova_base_ipv4_filter) + self._define_filter(self.nova_base_ipv6_filter) + self._define_filter(self.nova_dhcp_filter) + self._define_filter(self.nova_ra_filter) + self._define_filter(self.nova_vpn_filter) + if FLAGS.allow_project_net_traffic: + self._define_filter(self.nova_project_filter) + if FLAGS.use_ipv6: + self._define_filter(self.nova_project_filter_v6) + + self.static_filters_configured = True + + def _filter_container(self, name, filters): + xml = '''%s''' % ( + name, + ''.join(["" % (f,) for f in filters])) + return xml + nova_vpn_filter = ''' 2086015e-cf03-11df-8c5d-080027c27973 @@ -780,7 +1026,7 @@ class NWFilterFirewall(object): retval = "" for protocol in ['tcp', 'udp', 'icmp']: for direction, action, priority in [('out', 'accept', 399), - ('inout', 'drop', 400)]: + ('in', 'drop', 400)]: retval += """ <%s /> """ % (action, direction, @@ -790,13 +1036,13 @@ class NWFilterFirewall(object): def nova_base_ipv6_filter(self): retval = "" - for protocol in ['tcp', 'udp', 'icmp']: + for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']: for direction, action, priority in [('out', 'accept', 399), - ('inout', 'drop', 400)]: + ('in', 'drop', 400)]: retval += """ - <%s-ipv6 /> + <%s /> """ % (action, direction, - priority, protocol) + priority, protocol) retval += '' return retval @@ -809,50 +1055,73 @@ class NWFilterFirewall(object): retval += '' return retval + def nova_project_filter_v6(self): + retval = "" + for protocol in ['tcp-ipv6', 'udp-ipv6', 'icmpv6']: + retval += """ + <%s srcipaddr='$PROJNETV6' + srcipmask='$PROJMASKV6' /> + """ % (protocol) + retval += '' + return retval + def _define_filter(self, xml): if callable(xml): xml = xml() - # execute in a native thread and block current greenthread until done tpool.execute(self._conn.nwfilterDefineXML, xml) - def setup_base_nwfilters(self): - self._define_filter(self.nova_base_ipv4_filter) - self._define_filter(self.nova_base_ipv6_filter) - self._define_filter(self.nova_dhcp_filter) - self._define_filter(self.nova_base_filter) - self._define_filter(self.nova_vpn_filter) - if FLAGS.allow_project_net_traffic: - self._define_filter(self.nova_project_filter) + def unfilter_instance(self, instance): + # Nothing to do + pass - def setup_nwfilters_for_instance(self, instance): + def prepare_instance_filter(self, instance): """ Creates an NWFilter for the given instance. In the process, it makes sure the filters for the security groups as well as the base filter are all in place. """ - - nwfilter_xml = ("\n") % instance['name'] - if instance['image_id'] == FLAGS.vpn_image_id: - nwfilter_xml += " \n" + base_filter = 'nova-vpn' else: - nwfilter_xml += " \n" + base_filter = 'nova-base' + + instance_filter_name = self._instance_filter_name(instance) + instance_secgroup_filter_name = '%s-secgroup' % (instance_filter_name,) + instance_filter_children = [base_filter, instance_secgroup_filter_name] + instance_secgroup_filter_children = ['nova-base-ipv4', + 'nova-base-ipv6', + 'nova-allow-dhcp-server'] + if FLAGS.use_ipv6: + instance_secgroup_filter_children += ['nova-allow-ra-server'] + + ctxt = context.get_admin_context() if FLAGS.allow_project_net_traffic: - nwfilter_xml += " \n" + instance_filter_children += ['nova-project'] + if FLAGS.use_ipv6: + instance_filter_children += ['nova-project-v6'] - for security_group in instance.security_groups: - self.ensure_security_group_filter(security_group['id']) + for security_group in db.security_group_get_by_instance(ctxt, + instance['id']): - nwfilter_xml += (" \n") % security_group['id'] - nwfilter_xml += "" + self.refresh_security_group_rules(security_group['id']) - self._define_filter(nwfilter_xml) + instance_secgroup_filter_children += [('nova-secgroup-%s' % + security_group['id'])] - def ensure_security_group_filter(self, security_group_id): + self._define_filter( + self._filter_container(instance_secgroup_filter_name, + instance_secgroup_filter_children)) + + self._define_filter( + self._filter_container(instance_filter_name, + instance_filter_children)) + + return + + def refresh_security_group_rules(self, security_group_id): return self._define_filter( self.security_group_to_nwfilter_xml(security_group_id)) @@ -860,19 +1129,26 @@ class NWFilterFirewall(object): security_group = db.security_group_get(context.get_admin_context(), security_group_id) rule_xml = "" + v6protocol = {'tcp': 'tcp-ipv6', 'udp': 'udp-ipv6', 'icmp': 'icmpv6'} for rule in security_group.rules: rule_xml += "" if rule.cidr: - net, mask = _get_net_and_mask(rule.cidr) - rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ - (rule.protocol, net, mask) + version = _get_ip_version(rule.cidr) + if(FLAGS.use_ipv6 and version == 6): + net, prefixlen = _get_net_and_prefixlen(rule.cidr) + rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ + (v6protocol[rule.protocol], net, prefixlen) + else: + net, mask = _get_net_and_mask(rule.cidr) + rule_xml += "<%s srcipaddr='%s' srcipmask='%s' " % \ + (rule.protocol, net, mask) if rule.protocol in ['tcp', 'udp']: rule_xml += "dstportstart='%s' dstportend='%s' " % \ (rule.from_port, rule.to_port) elif rule.protocol == 'icmp': - logging.info('rule.protocol: %r, rule.from_port: %r, ' - 'rule.to_port: %r' % - (rule.protocol, rule.from_port, rule.to_port)) + LOG.info('rule.protocol: %r, rule.from_port: %r, ' + 'rule.to_port: %r', rule.protocol, + rule.from_port, rule.to_port) if rule.from_port != -1: rule_xml += "type='%s' " % rule.from_port if rule.to_port != -1: @@ -880,6 +1156,214 @@ class NWFilterFirewall(object): rule_xml += '/>\n' rule_xml += "\n" - xml = "%s" % \ - (security_group_id, rule_xml,) + xml = " %s"), msg or "", pformat(_db_content)) + LOG.debug(_("%s: _db_content => %s"), msg or "", pformat(_db_content)) def reset(): for c in _CLASSES: _db_content[c] = {} create_host('fake') + create_vm('fake', 'Running', is_a_template=False, is_control_domain=True) def create_host(name_label): @@ -134,14 +137,21 @@ def create_vdi(name_label, read_only, sr_ref, sharable): def create_vbd(vm_ref, vdi_ref): - vbd_rec = {'VM': vm_ref, 'VDI': vdi_ref} + vbd_rec = { + 'VM': vm_ref, + 'VDI': vdi_ref, + 'currently_attached': False, + } vbd_ref = _create_object('VBD', vbd_rec) after_VBD_create(vbd_ref, vbd_rec) return vbd_ref def after_VBD_create(vbd_ref, vbd_rec): - """Create backref from VM to VBD when VBD is created""" + """Create read-only fields and backref from VM to VBD when VBD is + created.""" + vbd_rec['currently_attached'] = False + vbd_rec['device'] = '' vm_ref = vbd_rec['VM'] vm_rec = _db_content['VM'][vm_ref] vm_rec['VBDs'] = [vbd_ref] @@ -150,9 +160,10 @@ def after_VBD_create(vbd_ref, vbd_rec): vbd_rec['vm_name_label'] = vm_name_label -def create_pbd(config, sr_ref, attached): +def create_pbd(config, host_ref, sr_ref, attached): return _create_object('PBD', { 'device-config': config, + 'host': host_ref, 'SR': sr_ref, 'currently-attached': attached, }) @@ -165,6 +176,33 @@ def create_task(name_label): }) +def create_local_srs(): + """Create an SR that looks like the one created on the local disk by + default by the XenServer installer. Do this one per host.""" + for host_ref in _db_content['host'].keys(): + _create_local_sr(host_ref) + + +def _create_local_sr(host_ref): + sr_ref = _create_object('SR', { + 'name_label': 'Local storage', + 'type': 'lvm', + 'content_type': 'user', + 'shared': False, + 'physical_size': str(1 << 30), + 'physical_utilisation': str(0), + 'virtual_allocation': str(0), + 'other_config': { + 'i18n-original-value-name_label': 'Local storage', + 'i18n-key': 'local-storage', + }, + 'VDIs': [] + }) + pbd_ref = create_pbd('', host_ref, sr_ref, True) + _db_content['SR'][sr_ref]['PBDs'] = [pbd_ref] + return sr_ref + + def _create_object(table, obj): ref = str(uuid.uuid4()) obj['uuid'] = str(uuid.uuid4()) @@ -177,9 +215,10 @@ def _create_sr(table, obj): # Forces fake to support iscsi only if sr_type != 'iscsi': raise Failure(['SR_UNKNOWN_DRIVER', sr_type]) + host_ref = _db_content['host'].keys()[0] sr_ref = _create_object(table, obj[2]) vdi_ref = create_vdi('', False, sr_ref, False) - pbd_ref = create_pbd('', sr_ref, True) + pbd_ref = create_pbd('', host_ref, sr_ref, True) _db_content['SR'][sr_ref]['VDIs'] = [vdi_ref] _db_content['SR'][sr_ref]['PBDs'] = [pbd_ref] _db_content['VDI'][vdi_ref]['SR'] = sr_ref @@ -231,6 +270,20 @@ class SessionBase(object): def __init__(self, uri): self._session = None + def VBD_plug(self, _1, ref): + rec = get_record('VBD', ref) + if rec['currently_attached']: + raise Failure(['DEVICE_ALREADY_ATTACHED', ref]) + rec['currently_attached'] = True + rec['device'] = rec['userdevice'] + + def VBD_unplug(self, _1, ref): + rec = get_record('VBD', ref) + if not rec['currently_attached']: + raise Failure(['DEVICE_ALREADY_DETACHED', ref]) + rec['currently_attached'] = False + rec['device'] = '' + def xenapi_request(self, methodname, params): if methodname.startswith('login'): self._login(methodname, params) @@ -242,9 +295,9 @@ class SessionBase(object): full_params = (self._session,) + params meth = getattr(self, methodname, None) if meth is None: - logging.warn('Raising NotImplemented') + LOG.debug(_('Raising NotImplemented')) raise NotImplementedError( - 'xenapi.fake does not have an implementation for %s' % + _('xenapi.fake does not have an implementation for %s') % methodname) return meth(*full_params) @@ -278,15 +331,17 @@ class SessionBase(object): if impl is not None: def callit(*params): - logging.warn('Calling %s %s', name, impl) + LOG.debug(_('Calling %s %s'), name, impl) self._check_session(params) return impl(*params) return callit if self._is_gettersetter(name, True): - logging.warn('Calling getter %s', name) + LOG.debug(_('Calling getter %s'), name) return lambda *params: self._getter(name, params) elif self._is_create(name): return lambda *params: self._create(name, params) + elif self._is_destroy(name): + return lambda *params: self._destroy(name, params) else: return None @@ -297,10 +352,16 @@ class SessionBase(object): bits[1].startswith(getter and 'get_' or 'set_')) def _is_create(self, name): + return self._is_method(name, 'create') + + def _is_destroy(self, name): + return self._is_method(name, 'destroy') + + def _is_method(self, name, meth): bits = name.split('.') return (len(bits) == 2 and bits[0] in _CLASSES and - bits[1] == 'create') + bits[1] == meth) def _getter(self, name, params): self._check_session(params) @@ -333,10 +394,10 @@ class SessionBase(object): field in _db_content[cls][ref]): return _db_content[cls][ref][field] - logging.error('Raising NotImplemented') + LOG.debuug(_('Raising NotImplemented')) raise NotImplementedError( - 'xenapi.fake does not have an implementation for %s or it has ' - 'been called with the wrong number of arguments' % name) + _('xenapi.fake does not have an implementation for %s or it has ' + 'been called with the wrong number of arguments') % name) def _setter(self, name, params): self._check_session(params) @@ -351,7 +412,7 @@ class SessionBase(object): field in _db_content[cls][ref]): _db_content[cls][ref][field] = val - logging.warn('Raising NotImplemented') + LOG.debug(_('Raising NotImplemented')) raise NotImplementedError( 'xenapi.fake does not have an implementation for %s or it has ' 'been called with the wrong number of arguments or the database ' @@ -368,10 +429,9 @@ class SessionBase(object): _create_sr(cls, params) or _create_object(cls, params[1]) # Call hook to provide any fixups needed (ex. creating backrefs) - try: - globals()["after_%s_create" % cls](ref, params[1]) - except KeyError: - pass + after_hook = 'after_%s_create' % cls + if after_hook in globals(): + globals()[after_hook](ref, params[1]) obj = get_record(cls, ref) @@ -381,6 +441,15 @@ class SessionBase(object): return ref + def _destroy(self, name, params): + self._check_session(params) + self._check_arg_count(params, 2) + table, _ = name.split('.') + ref = params[1] + if ref not in _db_content[table]: + raise Failure(['HANDLE_INVALID', table, ref]) + del _db_content[table][ref] + def _async(self, name, params): task_ref = create_task(name) task = _db_content['task'][task_ref] @@ -399,7 +468,7 @@ class SessionBase(object): self._session not in _db_content['session']): raise Failure(['HANDLE_INVALID', 'session', self._session]) if len(params) == 0 or params[0] != self._session: - logging.warn('Raising NotImplemented') + LOG.debug(_('Raising NotImplemented')) raise NotImplementedError('Call to XenAPI without using .xenapi') def _check_arg_count(self, params, expected): @@ -418,7 +487,7 @@ class SessionBase(object): try: return result[0] except IndexError: - return None + raise Failure(['UUID_INVALID', v, result, recs, k]) return result diff --git a/nova/virt/xenapi/vm_utils.py b/nova/virt/xenapi/vm_utils.py index 9d1b51848973..b80ff4dbadad 100644 --- a/nova/virt/xenapi/vm_utils.py +++ b/nova/virt/xenapi/vm_utils.py @@ -19,14 +19,17 @@ Helper methods for operations related to the management of VM records and their attributes like VDIs, VIFs, as well as their lookup functions. """ -import logging +import os import pickle +import re import urllib from xml.dom import minidom from eventlet import event +import glance.client from nova import exception from nova import flags +from nova import log as logging from nova import utils from nova.auth.manager import AuthManager from nova.compute import instance_types @@ -37,6 +40,7 @@ from nova.virt.xenapi.volume_utils import StorageError FLAGS = flags.FLAGS +LOG = logging.getLogger("nova.virt.xenapi.vm_utils") XENAPI_POWER_STATE = { 'Halted': power_state.SHUTDOWN, @@ -46,17 +50,23 @@ XENAPI_POWER_STATE = { 'Crashed': power_state.CRASHED} -class ImageType: - """ - Enumeration class for distinguishing different image types - 0 - kernel/ramdisk image (goes on dom0's filesystem) - 1 - disk image (local SR, partitioned by objectstore plugin) - 2 - raw disk image (local SR, NOT partitioned by plugin) - """ +SECTOR_SIZE = 512 +MBR_SIZE_SECTORS = 63 +MBR_SIZE_BYTES = MBR_SIZE_SECTORS * SECTOR_SIZE +KERNEL_DIR = '/boot/guest' - KERNEL_RAMDISK = 0 - DISK = 1 - DISK_RAW = 2 + +class ImageType: + """ + Enumeration class for distinguishing different image types + 0 - kernel/ramdisk image (goes on dom0's filesystem) + 1 - disk image (local SR, partitioned by objectstore plugin) + 2 - raw disk image (local SR, NOT partitioned by plugin) + """ + + KERNEL_RAMDISK = 0 + DISK = 1 + DISK_RAW = 2 class VMHelper(HelperBase): @@ -121,9 +131,9 @@ class VMHelper(HelperBase): rec['HVM_boot_params'] = {'order': 'dc'} rec['platform'] = {'acpi': 'true', 'apic': 'true', 'pae': 'true', 'viridian': 'true'} - logging.debug('Created VM %s...', instance.name) + LOG.debug(_('Created VM %s...'), instance.name) vm_ref = session.call_xenapi('VM.create', rec) - logging.debug(_('Created VM %s as %s.'), instance.name, vm_ref) + LOG.debug(_('Created VM %s as %s.'), instance.name, vm_ref) return vm_ref @classmethod @@ -143,10 +153,9 @@ class VMHelper(HelperBase): vbd_rec['qos_algorithm_type'] = '' vbd_rec['qos_algorithm_params'] = {} vbd_rec['qos_supported_algorithms'] = [] - logging.debug(_('Creating VBD for VM %s, VDI %s ... '), - vm_ref, vdi_ref) + LOG.debug(_('Creating VBD for VM %s, VDI %s ... '), vm_ref, vdi_ref) vbd_ref = session.call_xenapi('VBD.create', vbd_rec) - logging.debug(_('Created VBD %s for VM %s, VDI %s.'), vbd_ref, vm_ref, + LOG.debug(_('Created VBD %s for VM %s, VDI %s.'), vbd_ref, vm_ref, vdi_ref) return vbd_ref @@ -161,7 +170,7 @@ class VMHelper(HelperBase): if vbd_rec['userdevice'] == str(number): return vbd except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) raise StorageError(_('VBD not found in instance %s') % vm_ref) @classmethod @@ -170,7 +179,7 @@ class VMHelper(HelperBase): try: vbd_ref = session.call_xenapi('VBD.unplug', vbd_ref) except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) if exc.details[0] != 'DEVICE_ALREADY_DETACHED': raise StorageError(_('Unable to unplug VBD %s') % vbd_ref) @@ -183,7 +192,7 @@ class VMHelper(HelperBase): #with Josh Kearney session.wait_for_task(0, task) except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) raise StorageError(_('Unable to destroy VBD %s') % vbd_ref) @classmethod @@ -199,13 +208,32 @@ class VMHelper(HelperBase): vif_rec['other_config'] = {} vif_rec['qos_algorithm_type'] = '' vif_rec['qos_algorithm_params'] = {} - logging.debug(_('Creating VIF for VM %s, network %s.'), vm_ref, - network_ref) + LOG.debug(_('Creating VIF for VM %s, network %s.'), vm_ref, + network_ref) vif_ref = session.call_xenapi('VIF.create', vif_rec) - logging.debug(_('Created VIF %s for VM %s, network %s.'), vif_ref, - vm_ref, network_ref) + LOG.debug(_('Created VIF %s for VM %s, network %s.'), vif_ref, + vm_ref, network_ref) return vif_ref + @classmethod + def create_vdi(cls, session, sr_ref, name_label, virtual_size, read_only): + """Create a VDI record and returns its reference.""" + vdi_ref = session.get_xenapi().VDI.create( + {'name_label': name_label, + 'name_description': '', + 'SR': sr_ref, + 'virtual_size': str(virtual_size), + 'type': 'User', + 'sharable': False, + 'read_only': read_only, + 'xenstore_data': {}, + 'other_config': {}, + 'sm_config': {}, + 'tags': []}) + LOG.debug(_('Created VDI %s (%s, %s, %s) on %s.'), vdi_ref, + name_label, virtual_size, read_only, sr_ref) + return vdi_ref + @classmethod def create_snapshot(cls, session, instance_id, vm_ref, label): """ Creates Snapshot (Template) VM, Snapshot VBD, Snapshot VDI, @@ -213,8 +241,7 @@ class VMHelper(HelperBase): """ #TODO(sirp): Add quiesce and VSS locking support when Windows support # is added - logging.debug(_("Snapshotting VM %s with label '%s'..."), - vm_ref, label) + LOG.debug(_("Snapshotting VM %s with label '%s'..."), vm_ref, label) vm_vdi_ref, vm_vdi_rec = get_vdi_for_vm_safely(session, vm_ref) vm_vdi_uuid = vm_vdi_rec["uuid"] @@ -227,8 +254,8 @@ class VMHelper(HelperBase): template_vdi_rec = get_vdi_for_vm_safely(session, template_vm_ref)[1] template_vdi_uuid = template_vdi_rec["uuid"] - logging.debug(_('Created snapshot %s from VM %s.'), template_vm_ref, - vm_ref) + LOG.debug(_('Created snapshot %s from VM %s.'), template_vm_ref, + vm_ref) parent_uuid = wait_for_vhd_coalesce( session, instance_id, sr_ref, vm_vdi_ref, original_parent_uuid) @@ -237,15 +264,15 @@ class VMHelper(HelperBase): return template_vm_ref, [template_vdi_uuid, parent_uuid] @classmethod - def upload_image(cls, session, instance_id, vdi_uuids, image_name): + def upload_image(cls, session, instance_id, vdi_uuids, image_id): """ Requests that the Glance plugin bundle the specified VDIs and push them into Glance using the specified human-friendly name. """ - logging.debug(_("Asking xapi to upload %s as '%s'"), - vdi_uuids, image_name) + logging.debug(_("Asking xapi to upload %s as ID %s"), + vdi_uuids, image_id) params = {'vdi_uuids': vdi_uuids, - 'image_name': image_name, + 'image_id': image_id, 'glance_host': FLAGS.glance_host, 'glance_port': FLAGS.glance_port} @@ -257,15 +284,71 @@ class VMHelper(HelperBase): def fetch_image(cls, session, instance_id, image, user, project, type): """ type is interpreted as an ImageType instance + Related flags: + xenapi_image_service = ['glance', 'objectstore'] + glance_address = 'address for glance services' + glance_port = 'port for glance services' """ - url = images.image_url(image) access = AuthManager().get_access_key(user, project) - logging.debug("Asking xapi to fetch %s as %s", url, access) + + if FLAGS.xenapi_image_service == 'glance': + return cls._fetch_image_glance(session, instance_id, image, + access, type) + else: + return cls._fetch_image_objectstore(session, instance_id, image, + access, user.secret, type) + + @classmethod + def _fetch_image_glance(cls, session, instance_id, image, access, type): + sr = find_sr(session) + if sr is None: + raise exception.NotFound('Cannot find SR to write VDI to') + + c = glance.client.Client(FLAGS.glance_host, FLAGS.glance_port) + + meta, image_file = c.get_image(image) + virtual_size = int(meta['size']) + vdi_size = virtual_size + LOG.debug(_("Size for image %s:%d"), image, virtual_size) + if type == ImageType.DISK: + # Make room for MBR. + vdi_size += MBR_SIZE_BYTES + + vdi = cls.create_vdi(session, sr, _('Glance image %s') % image, + vdi_size, False) + + with_vdi_attached_here(session, vdi, False, + lambda dev: + _stream_disk(dev, type, + virtual_size, image_file)) + if (type == ImageType.KERNEL_RAMDISK): + #we need to invoke a plugin for copying VDI's + #content into proper path + LOG.debug(_("Copying VDI %s to /boot/guest on dom0"), vdi) + fn = "copy_kernel_vdi" + args = {} + args['vdi-ref'] = vdi + #let the plugin copy the correct number of bytes + args['image-size'] = str(vdi_size) + task = session.async_call_plugin('glance', fn, args) + filename = session.wait_for_task(instance_id, task) + #remove the VDI as it is not needed anymore + session.get_xenapi().VDI.destroy(vdi) + LOG.debug(_("Kernel/Ramdisk VDI %s destroyed"), vdi) + return filename + else: + return session.get_xenapi().VDI.get_uuid(vdi) + + @classmethod + def _fetch_image_objectstore(cls, session, instance_id, image, access, + secret, type): + url = images.image_url(image) + LOG.debug(_("Asking xapi to fetch %s as %s"), url, access) fn = (type != ImageType.KERNEL_RAMDISK) and 'get_vdi' or 'get_kernel' args = {} args['src_url'] = url args['username'] = access - args['password'] = user.secret + args['password'] = secret args['add_partition'] = 'false' args['raw'] = 'false' if type != ImageType.KERNEL_RAMDISK: @@ -277,21 +360,45 @@ class VMHelper(HelperBase): return uuid @classmethod - def lookup_image(cls, session, vdi_ref): - logging.debug("Looking up vdi %s for PV kernel", vdi_ref) + def lookup_image(cls, session, instance_id, vdi_ref): + if FLAGS.xenapi_image_service == 'glance': + return cls._lookup_image_glance(session, vdi_ref) + else: + return cls._lookup_image_objectstore(session, instance_id, vdi_ref) + + @classmethod + def _lookup_image_objectstore(cls, session, instance_id, vdi_ref): + LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref) fn = "is_vdi_pv" args = {} args['vdi-ref'] = vdi_ref - #TODO: Call proper function in plugin task = session.async_call_plugin('objectstore', fn, args) - pv_str = session.wait_for_task(task) + pv_str = session.wait_for_task(instance_id, task) + pv = None if pv_str.lower() == 'true': pv = True elif pv_str.lower() == 'false': pv = False - logging.debug("PV Kernel in VDI:%d", pv) + LOG.debug(_("PV Kernel in VDI:%d"), pv) return pv + @classmethod + def _lookup_image_glance(cls, session, vdi_ref): + LOG.debug(_("Looking up vdi %s for PV kernel"), vdi_ref) + + def is_vdi_pv(dev): + LOG.debug(_("Running pygrub against %s"), dev) + output = os.popen('pygrub -qn /dev/%s' % dev) + for line in output.readlines(): + #try to find kernel string + m = re.search('(?<=kernel:)/.*(?:>)', line) + if m and m.group(0).find('xen') != -1: + LOG.debug(_("Found Xen kernel %s") % m.group(0)) + return True + LOG.debug(_("No Xen kernel found. Booting HVM.")) + return False + return with_vdi_attached_here(session, vdi_ref, True, is_vdi_pv) + @classmethod def lookup(cls, session, i): """Look the instance i up, and returns it if available""" @@ -317,10 +424,9 @@ class VMHelper(HelperBase): vdi = session.get_xenapi().VBD.get_VDI(vbd) # Test valid VDI record = session.get_xenapi().VDI.get_record(vdi) - logging.debug(_('VDI %s is still available'), - record['uuid']) + LOG.debug(_('VDI %s is still available'), record['uuid']) except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) else: vdis.append(vdi) if len(vdis) > 0: @@ -331,10 +437,10 @@ class VMHelper(HelperBase): @classmethod def compile_info(cls, record): """Fill record with VM status information""" - logging.info(_("(VM_UTILS) xenserver vm state -> |%s|"), - record['power_state']) - logging.info(_("(VM_UTILS) xenapi power_state -> |%s|"), - XENAPI_POWER_STATE[record['power_state']]) + LOG.info(_("(VM_UTILS) xenserver vm state -> |%s|"), + record['power_state']) + LOG.info(_("(VM_UTILS) xenapi power_state -> |%s|"), + XENAPI_POWER_STATE[record['power_state']]) return {'state': XENAPI_POWER_STATE[record['power_state']], 'max_mem': long(record['memory_static_max']) >> 10, 'mem': long(record['memory_dynamic_max']) >> 10, @@ -360,7 +466,9 @@ class VMHelper(HelperBase): if i >= 3 and i <= 11: ref = node.childNodes # Name and Value - diags[ref[0].firstChild.data] = ref[6].firstChild.data + if len(ref) > 6: + diags[ref[0].firstChild.data] = \ + ref[6].firstChild.data return diags except cls.XenAPI.Failure as e: return {"Unable to retrieve diagnostics": e} @@ -388,11 +496,9 @@ def get_vhd_parent(session, vdi_rec): """ if 'vhd-parent' in vdi_rec['sm_config']: parent_uuid = vdi_rec['sm_config']['vhd-parent'] - #NOTE(sirp): changed xenapi -> get_xenapi() parent_ref = session.get_xenapi().VDI.get_by_uuid(parent_uuid) parent_rec = session.get_xenapi().VDI.get_record(parent_ref) - #NOTE(sirp): changed log -> logging - logging.debug(_("VHD %s has parent %s"), vdi_rec['uuid'], parent_ref) + LOG.debug(_("VHD %s has parent %s"), vdi_rec['uuid'], parent_ref) return parent_ref, parent_rec else: return None @@ -409,7 +515,7 @@ def get_vhd_parent_uuid(session, vdi_ref): def scan_sr(session, instance_id, sr_ref): - logging.debug(_("Re-scanning SR %s"), sr_ref) + LOG.debug(_("Re-scanning SR %s"), sr_ref) task = session.call_xenapi('Async.SR.scan', sr_ref) session.wait_for_task(instance_id, task) @@ -427,24 +533,29 @@ def wait_for_vhd_coalesce(session, instance_id, sr_ref, vdi_ref, * parent_vhd snapshot """ - #TODO(sirp): we need to timeout this req after a while + max_attempts = FLAGS.xenapi_vhd_coalesce_max_attempts + attempts = {'counter': 0} def _poll_vhds(): + attempts['counter'] += 1 + if attempts['counter'] > max_attempts: + msg = (_("VHD coalesce attempts exceeded (%d > %d), giving up...") + % (attempts['counter'], max_attempts)) + raise exception.Error(msg) + scan_sr(session, instance_id, sr_ref) parent_uuid = get_vhd_parent_uuid(session, vdi_ref) if original_parent_uuid and (parent_uuid != original_parent_uuid): - logging.debug( - _("Parent %s doesn't match original parent %s, " - "waiting for coalesce..."), - parent_uuid, original_parent_uuid) + LOG.debug(_("Parent %s doesn't match original parent %s, " + "waiting for coalesce..."), parent_uuid, + original_parent_uuid) else: - done.send(parent_uuid) + # Breakout of the loop (normally) and return the parent_uuid + raise utils.LoopingCallDone(parent_uuid) - done = event.Event() loop = utils.LoopingCall(_poll_vhds) loop.start(FLAGS.xenapi_vhd_coalesce_poll_interval, now=True) - parent_uuid = done.wait() - loop.stop() + parent_uuid = loop.wait() return parent_uuid @@ -461,3 +572,123 @@ def get_vdi_for_vm_safely(session, vm_ref): vdi_ref = vdi_refs[0] vdi_rec = session.get_xenapi().VDI.get_record(vdi_ref) return vdi_ref, vdi_rec + + +def find_sr(session): + host = session.get_xenapi_host() + srs = session.get_xenapi().SR.get_all() + for sr in srs: + sr_rec = session.get_xenapi().SR.get_record(sr) + if not ('i18n-key' in sr_rec['other_config'] and + sr_rec['other_config']['i18n-key'] == 'local-storage'): + continue + for pbd in sr_rec['PBDs']: + pbd_rec = session.get_xenapi().PBD.get_record(pbd) + if pbd_rec['host'] == host: + return sr + return None + + +def with_vdi_attached_here(session, vdi, read_only, f): + this_vm_ref = get_this_vm_ref(session) + vbd_rec = {} + vbd_rec['VM'] = this_vm_ref + vbd_rec['VDI'] = vdi + vbd_rec['userdevice'] = 'autodetect' + vbd_rec['bootable'] = False + vbd_rec['mode'] = read_only and 'RO' or 'RW' + vbd_rec['type'] = 'disk' + vbd_rec['unpluggable'] = True + vbd_rec['empty'] = False + vbd_rec['other_config'] = {} + vbd_rec['qos_algorithm_type'] = '' + vbd_rec['qos_algorithm_params'] = {} + vbd_rec['qos_supported_algorithms'] = [] + LOG.debug(_('Creating VBD for VDI %s ... '), vdi) + vbd = session.get_xenapi().VBD.create(vbd_rec) + LOG.debug(_('Creating VBD for VDI %s done.'), vdi) + try: + LOG.debug(_('Plugging VBD %s ... '), vbd) + session.get_xenapi().VBD.plug(vbd) + LOG.debug(_('Plugging VBD %s done.'), vbd) + return f(session.get_xenapi().VBD.get_device(vbd)) + finally: + LOG.debug(_('Destroying VBD for VDI %s ... '), vdi) + vbd_unplug_with_retry(session, vbd) + ignore_failure(session.get_xenapi().VBD.destroy, vbd) + LOG.debug(_('Destroying VBD for VDI %s done.'), vdi) + + +def vbd_unplug_with_retry(session, vbd): + """Call VBD.unplug on the given VBD, with a retry if we get + DEVICE_DETACH_REJECTED. For reasons which I don't understand, we're + seeing the device still in use, even when all processes using the device + should be dead.""" + while True: + try: + session.get_xenapi().VBD.unplug(vbd) + LOG.debug(_('VBD.unplug successful first time.')) + return + except VMHelper.XenAPI.Failure, e: + if (len(e.details) > 0 and + e.details[0] == 'DEVICE_DETACH_REJECTED'): + LOG.debug(_('VBD.unplug rejected: retrying...')) + time.sleep(1) + elif (len(e.details) > 0 and + e.details[0] == 'DEVICE_ALREADY_DETACHED'): + LOG.debug(_('VBD.unplug successful eventually.')) + return + else: + LOG.error(_('Ignoring XenAPI.Failure in VBD.unplug: %s'), + e) + return + + +def ignore_failure(func, *args, **kwargs): + try: + return func(*args, **kwargs) + except VMHelper.XenAPI.Failure, e: + LOG.error(_('Ignoring XenAPI.Failure %s'), e) + return None + + +def get_this_vm_uuid(): + with file('/sys/hypervisor/uuid') as f: + return f.readline().strip() + + +def get_this_vm_ref(session): + return session.get_xenapi().VM.get_by_uuid(get_this_vm_uuid()) + + +def _stream_disk(dev, type, virtual_size, image_file): + offset = 0 + if type == ImageType.DISK: + offset = MBR_SIZE_BYTES + _write_partition(virtual_size, dev) + + with open('/dev/%s' % dev, 'wb') as f: + f.seek(offset) + for chunk in image_file: + f.write(chunk) + + +def _write_partition(virtual_size, dev): + dest = '/dev/%s' % dev + mbr_last = MBR_SIZE_SECTORS - 1 + primary_first = MBR_SIZE_SECTORS + primary_last = MBR_SIZE_SECTORS + (virtual_size / SECTOR_SIZE) - 1 + + LOG.debug(_('Writing partition table %d %d to %s...'), + primary_first, primary_last, dest) + + def execute(cmd, process_input=None, check_exit_code=True): + return utils.execute(cmd=cmd, + process_input=process_input, + check_exit_code=check_exit_code) + + execute('parted --script %s mklabel msdos' % dest) + execute('parted --script %s mkpart primary %ds %ds' % + (dest, primary_first, primary_last)) + + LOG.debug(_('Writing partition table %s done.'), dest) diff --git a/nova/virt/xenapi/vmops.py b/nova/virt/xenapi/vmops.py index b6d6207827c4..6c2fd6a687af 100644 --- a/nova/virt/xenapi/vmops.py +++ b/nova/virt/xenapi/vmops.py @@ -20,10 +20,15 @@ Management class for VM-related functions (spawn, reboot, etc). """ import json -import logging +import M2Crypto +import os +import subprocess +import tempfile +import uuid from nova import db from nova import context +from nova import log as logging from nova import exception from nova import utils @@ -33,6 +38,9 @@ from nova.virt.xenapi.network_utils import NetworkHelper from nova.virt.xenapi.vm_utils import VMHelper from nova.virt.xenapi.vm_utils import ImageType +XenAPI = None +LOG = logging.getLogger("nova.virt.xenapi.vmops") + class VMOps(object): """ @@ -77,7 +85,8 @@ class VMOps(object): #Have a look at the VDI and see if it has a PV kernel pv_kernel = False if not instance.kernel_id: - pv_kernel = VMHelper.lookup_image(self._session, vdi_ref) + pv_kernel = VMHelper.lookup_image(self._session, instance.id, + vdi_ref) kernel = None if instance.kernel_id: kernel = VMHelper.fetch_image(self._session, instance.id, @@ -93,10 +102,9 @@ class VMOps(object): if network_ref: VMHelper.create_vif(self._session, vm_ref, network_ref, instance.mac_address) - logging.debug(_('Starting VM %s...'), vm_ref) + LOG.debug(_('Starting VM %s...'), vm_ref) self._session.call_xenapi('VM.start', vm_ref, False, False) - logging.info(_('Spawning VM %s created %s.'), instance.name, - vm_ref) + LOG.info(_('Spawning VM %s created %s.'), instance.name, vm_ref) # NOTE(armando): Do we really need to do this in virt? timer = utils.LoopingCall(f=None) @@ -107,12 +115,12 @@ class VMOps(object): db.instance_set_state(context.get_admin_context(), instance['id'], state) if state == power_state.RUNNING: - logging.debug(_('Instance %s: booted'), instance['name']) + LOG.debug(_('Instance %s: booted'), instance['name']) timer.stop() except Exception, exc: - logging.warn(exc) - logging.exception(_('instance %s: failed to boot'), - instance['name']) + LOG.warn(exc) + LOG.exception(_('instance %s: failed to boot'), + instance['name']) db.instance_set_state(context.get_admin_context(), instance['id'], power_state.SHUTDOWN) @@ -125,21 +133,40 @@ class VMOps(object): """Refactored out the common code of many methods that receive either a vm name or a vm instance, and want a vm instance in return. """ + vm = None try: - instance_name = instance_or_vm.name - vm = VMHelper.lookup(self._session, instance_name) - except AttributeError: - # A vm opaque ref was passed - vm = instance_or_vm + if instance_or_vm.startswith("OpaqueRef:"): + # Got passed an opaque ref; return it + return instance_or_vm + else: + # Must be the instance name + instance_name = instance_or_vm + except (AttributeError, KeyError): + # Note the the KeyError will only happen with fakes.py + # Not a string; must be an ID or a vm instance + if isinstance(instance_or_vm, (int, long)): + ctx = context.get_admin_context() + try: + instance_obj = db.instance_get_by_id(ctx, instance_or_vm) + instance_name = instance_obj.name + except exception.NotFound: + # The unit tests screw this up, as they use an integer for + # the vm name. I'd fix that up, but that's a matter for + # another bug report. So for now, just try with the passed + # value + instance_name = instance_or_vm + else: + instance_name = instance_or_vm.name + vm = VMHelper.lookup(self._session, instance_name) if vm is None: raise Exception(_('Instance not present %s') % instance_name) return vm - def snapshot(self, instance, name): + def snapshot(self, instance, image_id): """ Create snapshot from a running VM instance :param instance: instance to be snapshotted - :param name: name/label to be given to the snapshot + :param image_id: id of image to upload to Steps involved in a XenServer snapshot: @@ -175,7 +202,7 @@ class VMOps(object): try: # call plugin to ship snapshot off to glance VMHelper.upload_image( - self._session, instance.id, template_vdi_uuids, name) + self._session, instance.id, template_vdi_uuids, image_id) finally: self._destroy(instance, template_vm_ref, shutdown=False) @@ -187,6 +214,44 @@ class VMOps(object): task = self._session.call_xenapi('Async.VM.clean_reboot', vm) self._session.wait_for_task(instance.id, task) + def set_admin_password(self, instance, new_pass): + """Set the root/admin password on the VM instance. This is done via + an agent running on the VM. Communication between nova and the agent + is done via writing xenstore records. Since communication is done over + the XenAPI RPC calls, we need to encrypt the password. We're using a + simple Diffie-Hellman class instead of the more advanced one in + M2Crypto for compatibility with the agent code. + """ + # Need to uniquely identify this request. + transaction_id = str(uuid.uuid4()) + # The simple Diffie-Hellman class is used to manage key exchange. + dh = SimpleDH() + args = {'id': transaction_id, 'pub': str(dh.get_public())} + resp = self._make_agent_call('key_init', instance, '', args) + if resp is None: + # No response from the agent + return + resp_dict = json.loads(resp) + # Successful return code from key_init is 'D0' + if resp_dict['returncode'] != 'D0': + # There was some sort of error; the message will contain + # a description of the error. + raise RuntimeError(resp_dict['message']) + agent_pub = int(resp_dict['message']) + dh.compute_shared(agent_pub) + enc_pass = dh.encrypt(new_pass) + # Send the encrypted password + args['enc_pass'] = enc_pass + resp = self._make_agent_call('password', instance, '', args) + if resp is None: + # No response from the agent + return + resp_dict = json.loads(resp) + # Successful return code from password is '0' + if resp_dict['returncode'] != '0': + raise RuntimeError(resp_dict['message']) + return resp_dict['message'] + def destroy(self, instance): """Destroy VM instance""" vm = VMHelper.lookup(self._session, instance.name) @@ -205,7 +270,7 @@ class VMOps(object): task = self._session.call_xenapi('Async.VM.hard_shutdown', vm) self._session.wait_for_task(instance.id, task) except self.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) # Disk clean-up if vdis: @@ -214,20 +279,20 @@ class VMOps(object): task = self._session.call_xenapi('Async.VDI.destroy', vdi) self._session.wait_for_task(instance.id, task) except self.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) # VM Destroy try: task = self._session.call_xenapi('Async.VM.destroy', vm) self._session.wait_for_task(instance.id, task) except self.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) def _wait_with_callback(self, instance_id, task, callback): ret = None try: ret = self._session.wait_for_task(instance_id, task) except self.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) callback(ret) def pause(self, instance, callback): @@ -244,30 +309,19 @@ class VMOps(object): def suspend(self, instance, callback): """suspend the specified instance""" - instance_name = instance.name - vm = VMHelper.lookup(self._session, instance_name) - if vm is None: - raise Exception(_("suspend: instance not present %s") % - instance_name) + vm = self._get_vm_opaque_ref(instance) task = self._session.call_xenapi('Async.VM.suspend', vm) - self._wait_with_callback(task, callback) + self._wait_with_callback(instance.id, task, callback) def resume(self, instance, callback): """resume the specified instance""" - instance_name = instance.name - vm = VMHelper.lookup(self._session, instance_name) - if vm is None: - raise Exception(_("resume: instance not present %s") % - instance_name) + vm = self._get_vm_opaque_ref(instance) task = self._session.call_xenapi('Async.VM.resume', vm, False, True) - self._wait_with_callback(task, callback) + self._wait_with_callback(instance.id, task, callback) - def get_info(self, instance_id): + def get_info(self, instance): """Return data about VM instance""" - vm = VMHelper.lookup(self._session, instance_id) - if vm is None: - raise exception.NotFound(_('Instance not' - ' found %s') % instance_id) + vm = self._get_vm_opaque_ref(instance) rec = self._session.get_xenapi().VM.get_record(vm) return VMHelper.compile_info(rec) @@ -282,6 +336,11 @@ class VMOps(object): # TODO: implement this to fix pylint! return 'FAKE CONSOLE OUTPUT of instance' + def get_ajax_console(self, instance): + """Return link to instance's ajax console""" + # TODO: implement this! + return 'http://fakeajaxconsole/fake_url' + def list_from_xenstore(self, vm, path): """Runs the xenstore-ls command to get a listing of all records from 'path' downward. Returns a dict with the sub-paths as keys, @@ -326,22 +385,34 @@ class VMOps(object): return self._make_plugin_call('xenstore.py', method=method, vm=vm, path=path, addl_args=addl_args) + def _make_agent_call(self, method, vm, path, addl_args={}): + """Abstracts out the interaction with the agent xenapi plugin.""" + return self._make_plugin_call('agent', method=method, vm=vm, + path=path, addl_args=addl_args) + def _make_plugin_call(self, plugin, method, vm, path, addl_args={}): """Abstracts out the process of calling a method of a xenapi plugin. Any errors raised by the plugin will in turn raise a RuntimeError here. """ + instance_id = vm.id vm = self._get_vm_opaque_ref(vm) rec = self._session.get_xenapi().VM.get_record(vm) args = {'dom_id': rec['domid'], 'path': path} args.update(addl_args) - # If the 'testing_mode' attribute is set, add that to the args. - if getattr(self, 'testing_mode', False): - args['testing_mode'] = 'true' try: task = self._session.async_call_plugin(plugin, method, args) - ret = self._session.wait_for_task(0, task) + ret = self._session.wait_for_task(instance_id, task) except self.XenAPI.Failure, e: - raise RuntimeError("%s" % e.details[-1]) + ret = None + err_trace = e.details[-1] + err_msg = err_trace.splitlines()[-1] + strargs = str(args) + if 'TIMEOUT:' in err_msg: + LOG.error(_('TIMEOUT: The call to %(method)s timed out. ' + 'VM id=%(instance_id)s; args=%(strargs)s') % locals()) + else: + LOG.error(_('The call to %(method)s returned an error: %(e)s. ' + 'VM id=%(instance_id)s; args=%(strargs)s') % locals()) return ret def add_to_xenstore(self, vm, path, key, value): @@ -453,3 +524,89 @@ class VMOps(object): """Removes all data from the xenstore parameter record for this VM.""" self.write_to_param_xenstore(instance_or_vm, {}) ######################################################################## + + +def _runproc(cmd): + pipe = subprocess.PIPE + return subprocess.Popen([cmd], shell=True, stdin=pipe, stdout=pipe, + stderr=pipe, close_fds=True) + + +class SimpleDH(object): + """This class wraps all the functionality needed to implement + basic Diffie-Hellman-Merkle key exchange in Python. It features + intelligent defaults for the prime and base numbers needed for the + calculation, while allowing you to supply your own. It requires that + the openssl binary be installed on the system on which this is run, + as it uses that to handle the encryption and decryption. If openssl + is not available, a RuntimeError will be raised. + """ + def __init__(self, prime=None, base=None, secret=None): + """You can specify the values for prime and base if you wish; + otherwise, reasonable default values will be used. + """ + if prime is None: + self._prime = 162259276829213363391578010288127 + else: + self._prime = prime + if base is None: + self._base = 5 + else: + self._base = base + self._shared = self._public = None + + self._dh = M2Crypto.DH.set_params( + self.dec_to_mpi(self._prime), + self.dec_to_mpi(self._base)) + self._dh.gen_key() + self._public = self.mpi_to_dec(self._dh.pub) + + def get_public(self): + return self._public + + def compute_shared(self, other): + self._shared = self.bin_to_dec( + self._dh.compute_key(self.dec_to_mpi(other))) + return self._shared + + def mpi_to_dec(self, mpi): + bn = M2Crypto.m2.mpi_to_bn(mpi) + hexval = M2Crypto.m2.bn_to_hex(bn) + dec = int(hexval, 16) + return dec + + def bin_to_dec(self, binval): + bn = M2Crypto.m2.bin_to_bn(binval) + hexval = M2Crypto.m2.bn_to_hex(bn) + dec = int(hexval, 16) + return dec + + def dec_to_mpi(self, dec): + bn = M2Crypto.m2.dec_to_bn('%s' % dec) + mpi = M2Crypto.m2.bn_to_mpi(bn) + return mpi + + def _run_ssl(self, text, which): + base_cmd = ('cat %(tmpfile)s | openssl enc -aes-128-cbc ' + '-a -pass pass:%(shared)s -nosalt %(dec_flag)s') + if which.lower()[0] == 'd': + dec_flag = ' -d' + else: + dec_flag = '' + fd, tmpfile = tempfile.mkstemp() + os.close(fd) + file(tmpfile, 'w').write(text) + shared = self._shared + cmd = base_cmd % locals() + proc = _runproc(cmd) + proc.wait() + err = proc.stderr.read() + if err: + raise RuntimeError(_('OpenSSL error: %s') % err) + return proc.stdout.read() + + def encrypt(self, text): + return self._run_ssl(text, 'enc') + + def decrypt(self, text): + return self._run_ssl(text, 'dec') diff --git a/nova/virt/xenapi/volume_utils.py b/nova/virt/xenapi/volume_utils.py index 4bbc41b0323a..0cd15b950f17 100644 --- a/nova/virt/xenapi/volume_utils.py +++ b/nova/virt/xenapi/volume_utils.py @@ -21,16 +21,17 @@ and storage repositories import re import string -import logging from nova import db from nova import context from nova import exception from nova import flags +from nova import log as logging from nova import utils from nova.virt.xenapi import HelperBase FLAGS = flags.FLAGS +LOG = logging.getLogger("nova.virt.xenapi.volume_utils") class StorageError(Exception): @@ -53,7 +54,7 @@ class VolumeHelper(HelperBase): """ sr_ref = session.get_xenapi().SR.get_by_name_label(label) if len(sr_ref) == 0: - logging.debug('Introducing %s...', label) + LOG.debug(_('Introducing %s...'), label) record = {} if 'chapuser' in info and 'chappassword' in info: record = {'target': info['targetHost'], @@ -70,10 +71,10 @@ class VolumeHelper(HelperBase): session.get_xenapi_host(), record, '0', label, description, 'iscsi', '', False, {}) - logging.debug('Introduced %s as %s.', label, sr_ref) + LOG.debug(_('Introduced %s as %s.'), label, sr_ref) return sr_ref except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) raise StorageError(_('Unable to create Storage Repository')) else: return sr_ref[0] @@ -85,32 +86,32 @@ class VolumeHelper(HelperBase): vdi_ref = session.get_xenapi().VBD.get_VDI(vbd_ref) sr_ref = session.get_xenapi().VDI.get_SR(vdi_ref) except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) raise StorageError(_('Unable to find SR from VBD %s') % vbd_ref) return sr_ref @classmethod def destroy_iscsi_storage(cls, session, sr_ref): """Forget the SR whilst preserving the state of the disk""" - logging.debug("Forgetting SR %s ... ", sr_ref) + LOG.debug(_("Forgetting SR %s ... "), sr_ref) pbds = [] try: pbds = session.get_xenapi().SR.get_PBDs(sr_ref) except cls.XenAPI.Failure, exc: - logging.warn('Ignoring exception %s when getting PBDs for %s', - exc, sr_ref) + LOG.warn(_('Ignoring exception %s when getting PBDs for %s'), + exc, sr_ref) for pbd in pbds: try: session.get_xenapi().PBD.unplug(pbd) except cls.XenAPI.Failure, exc: - logging.warn('Ignoring exception %s when unplugging PBD %s', - exc, pbd) + LOG.warn(_('Ignoring exception %s when unplugging PBD %s'), + exc, pbd) try: session.get_xenapi().SR.forget(sr_ref) - logging.debug("Forgetting SR %s done.", sr_ref) + LOG.debug(_("Forgetting SR %s done."), sr_ref) except cls.XenAPI.Failure, exc: - logging.warn('Ignoring exception %s when forgetting SR %s', - exc, sr_ref) + LOG.warn(_('Ignoring exception %s when forgetting SR %s'), exc, + sr_ref) @classmethod def introduce_vdi(cls, session, sr_ref): @@ -118,12 +119,12 @@ class VolumeHelper(HelperBase): try: vdis = session.get_xenapi().SR.get_VDIs(sr_ref) except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) raise StorageError(_('Unable to introduce VDI on SR %s') % sr_ref) try: vdi_rec = session.get_xenapi().VDI.get_record(vdis[0]) except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) raise StorageError(_('Unable to get record' ' of VDI %s on') % vdis[0]) else: @@ -141,7 +142,7 @@ class VolumeHelper(HelperBase): vdi_rec['xenstore_data'], vdi_rec['sm_config']) except cls.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) raise StorageError(_('Unable to introduce VDI for SR %s') % sr_ref) @@ -165,11 +166,8 @@ class VolumeHelper(HelperBase): target_host = _get_target_host(iscsi_portal) target_port = _get_target_port(iscsi_portal) target_iqn = _get_iqn(iscsi_name, volume_id) - logging.debug('(vol_id,number,host,port,iqn): (%s,%s,%s,%s)', - volume_id, - target_host, - target_port, - target_iqn) + LOG.debug('(vol_id,number,host,port,iqn): (%s,%s,%s,%s)', + volume_id, target_host, target_port, target_iqn) if (device_number < 0) or \ (volume_id is None) or \ (target_host is None) or \ @@ -196,7 +194,7 @@ class VolumeHelper(HelperBase): elif re.match('^[0-9]+$', mountpoint): return string.atoi(mountpoint, 10) else: - logging.warn('Mountpoint cannot be translated: %s', mountpoint) + LOG.warn(_('Mountpoint cannot be translated: %s'), mountpoint) return -1 @@ -257,7 +255,7 @@ def _get_target(volume_id): "sendtargets -p %s" % volume_ref['host']) except exception.ProcessExecutionError, exc: - logging.warn(exc) + LOG.exception(exc) else: targets = r.splitlines() if len(_e) == 0 and len(targets) == 1: diff --git a/nova/virt/xenapi/volumeops.py b/nova/virt/xenapi/volumeops.py index fdeb2506ca0a..189f968c62f3 100644 --- a/nova/virt/xenapi/volumeops.py +++ b/nova/virt/xenapi/volumeops.py @@ -17,14 +17,17 @@ """ Management class for Storage-related functions (attach, detach, etc). """ -import logging from nova import exception +from nova import log as logging from nova.virt.xenapi.vm_utils import VMHelper from nova.virt.xenapi.volume_utils import VolumeHelper from nova.virt.xenapi.volume_utils import StorageError +LOG = logging.getLogger("nova.virt.xenapi.volumeops") + + class VolumeOps(object): """ Management class for Volume-related tasks @@ -45,8 +48,8 @@ class VolumeOps(object): raise exception.NotFound(_('Instance %s not found') % instance_name) # NOTE: No Resource Pool concept so far - logging.debug(_("Attach_volume: %s, %s, %s"), - instance_name, device_path, mountpoint) + LOG.debug(_("Attach_volume: %s, %s, %s"), + instance_name, device_path, mountpoint) # Create the iSCSI SR, and the PDB through which hosts access SRs. # But first, retrieve target info, like Host, IQN, LUN and SCSIID vol_rec = VolumeHelper.parse_volume_info(device_path, mountpoint) @@ -61,7 +64,7 @@ class VolumeOps(object): try: vdi_ref = VolumeHelper.introduce_vdi(self._session, sr_ref) except StorageError, exc: - logging.warn(exc) + LOG.exception(exc) VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) raise Exception(_('Unable to create VDI on SR %s for instance %s') % (sr_ref, @@ -73,7 +76,7 @@ class VolumeOps(object): vol_rec['deviceNumber'], False) except self.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) raise Exception(_('Unable to use SR %s for instance %s') % (sr_ref, @@ -84,13 +87,13 @@ class VolumeOps(object): vbd_ref) self._session.wait_for_task(vol_rec['deviceNumber'], task) except self.XenAPI.Failure, exc: - logging.warn(exc) + LOG.exception(exc) VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) raise Exception(_('Unable to attach volume to instance %s') % instance_name) - logging.info(_('Mountpoint %s attached to instance %s'), - mountpoint, instance_name) + LOG.info(_('Mountpoint %s attached to instance %s'), + mountpoint, instance_name) def detach_volume(self, instance_name, mountpoint): """Detach volume storage to VM instance""" @@ -100,13 +103,13 @@ class VolumeOps(object): raise exception.NotFound(_('Instance %s not found') % instance_name) # Detach VBD from VM - logging.debug(_("Detach_volume: %s, %s"), instance_name, mountpoint) + LOG.debug(_("Detach_volume: %s, %s"), instance_name, mountpoint) device_number = VolumeHelper.mountpoint_to_number(mountpoint) try: vbd_ref = VMHelper.find_vbd_by_number(self._session, vm_ref, device_number) except StorageError, exc: - logging.warn(exc) + LOG.exception(exc) raise Exception(_('Unable to locate volume %s') % mountpoint) else: try: @@ -114,13 +117,13 @@ class VolumeOps(object): vbd_ref) VMHelper.unplug_vbd(self._session, vbd_ref) except StorageError, exc: - logging.warn(exc) + LOG.exception(exc) raise Exception(_('Unable to detach volume %s') % mountpoint) try: VMHelper.destroy_vbd(self._session, vbd_ref) except StorageError, exc: - logging.warn(exc) + LOG.exception(exc) # Forget SR VolumeHelper.destroy_iscsi_storage(self._session, sr_ref) - logging.info(_('Mountpoint %s detached from instance %s'), - mountpoint, instance_name) + LOG.info(_('Mountpoint %s detached from instance %s'), + mountpoint, instance_name) diff --git a/nova/virt/xenapi_conn.py b/nova/virt/xenapi_conn.py index c48f5b7cbfa8..c57c883c97da 100644 --- a/nova/virt/xenapi_conn.py +++ b/nova/virt/xenapi_conn.py @@ -51,8 +51,8 @@ reactor thread if the VM.get_by_name_label or VM.get_record calls block. :iqn_prefix: IQN Prefix, e.g. 'iqn.2010-10.org.openstack' """ -import logging import sys +import urlparse import xmlrpclib from eventlet import event @@ -62,9 +62,14 @@ from nova import context from nova import db from nova import utils from nova import flags +from nova import log as logging from nova.virt.xenapi.vmops import VMOps from nova.virt.xenapi.volumeops import VolumeOps + +LOG = logging.getLogger("nova.virt.xenapi") + + FLAGS = flags.FLAGS flags.DEFINE_string('xenapi_connection_url', @@ -84,10 +89,17 @@ flags.DEFINE_float('xenapi_task_poll_interval', 'The interval used for polling of remote tasks ' '(Async.VM.start, etc). Used only if ' 'connection_type=xenapi.') +flags.DEFINE_string('xenapi_image_service', + 'glance', + 'Where to get VM images: glance or objectstore.') flags.DEFINE_float('xenapi_vhd_coalesce_poll_interval', 5.0, 'The interval used for polling of coalescing vhds.' ' Used only if connection_type=xenapi.') +flags.DEFINE_integer('xenapi_vhd_coalesce_max_attempts', + 5, + 'Max number of times to poll for VHD to coalesce.' + ' Used only if connection_type=xenapi.') flags.DEFINE_string('target_host', None, 'iSCSI Target Host') @@ -136,14 +148,18 @@ class XenAPIConnection(object): """Create VM instance""" self._vmops.spawn(instance) - def snapshot(self, instance, name): + def snapshot(self, instance, image_id): """ Create snapshot from a running VM instance """ - self._vmops.snapshot(instance, name) + self._vmops.snapshot(instance, image_id) def reboot(self, instance): """Reboot VM instance""" self._vmops.reboot(instance) + def set_admin_password(self, instance, new_pass): + """Set the root/admin password on the VM instance""" + self._vmops.set_admin_password(instance, new_pass) + def destroy(self, instance): """Destroy VM instance""" self._vmops.destroy(instance) @@ -176,6 +192,10 @@ class XenAPIConnection(object): """Return snapshot of console""" return self._vmops.get_console_output(instance) + def get_ajax_console(self, instance): + """Return link to instance's ajax console""" + return self._vmops.get_ajax_console(instance) + def attach_volume(self, instance_name, device_path, mountpoint): """Attach volume storage to VM instance""" return self._volumeops.attach_volume(instance_name, @@ -186,6 +206,12 @@ class XenAPIConnection(object): """Detach volume storage to VM instance""" return self._volumeops.detach_volume(instance_name, mountpoint) + def get_console_pool_info(self, console_type): + xs_url = urlparse.urlparse(FLAGS.xenapi_connection_url) + return {'address': xs_url.netloc, + 'username': FLAGS.xenapi_connection_username, + 'password': FLAGS.xenapi_connection_password} + class XenAPISession(object): """The session to invoke XenAPI SDK calls""" @@ -194,6 +220,7 @@ class XenAPISession(object): self.XenAPI = self.get_imported_xenapi() self._session = self._create_session(url) self._session.login_with_password(user, pw) + self.loop = None def get_imported_xenapi(self): """Stubout point. This can be replaced with a mock xenapi module.""" @@ -230,21 +257,28 @@ class XenAPISession(object): def wait_for_task(self, id, task): """Return the result of the given task. The task is polled - until it completes.""" + until it completes. Not re-entrant.""" done = event.Event() - loop = utils.LoopingCall(self._poll_task, id, task, done) - loop.start(FLAGS.xenapi_task_poll_interval, now=True) + self.loop = utils.LoopingCall(self._poll_task, id, task, done) + self.loop.start(FLAGS.xenapi_task_poll_interval, now=True) rv = done.wait() - loop.stop() + self.loop.stop() return rv + def _stop_loop(self): + """Stop polling for task to finish.""" + #NOTE(sandy-walsh) Had to break this call out to support unit tests. + if self.loop: + self.loop.stop() + def _create_session(self, url): """Stubout point. This can be replaced with a mock session.""" return self.XenAPI.Session(url) def _poll_task(self, id, task, done): """Poll the given XenAPI task, and fire the given action if we - get a result.""" + get a result. + """ try: name = self._session.xenapi.task.get_name_label(task) status = self._session.xenapi.task.get_status(task) @@ -256,7 +290,7 @@ class XenAPISession(object): return elif status == "success": result = self._session.xenapi.task.get_result(task) - logging.info(_("Task [%s] %s status: success %s") % ( + LOG.info(_("Task [%s] %s status: success %s") % ( name, task, result)) @@ -264,7 +298,7 @@ class XenAPISession(object): else: error_info = self._session.xenapi.task.get_error_info(task) action["error"] = str(error_info) - logging.warn(_("Task [%s] %s status: %s %s") % ( + LOG.warn(_("Task [%s] %s status: %s %s") % ( name, task, status, @@ -272,15 +306,16 @@ class XenAPISession(object): done.send_exception(self.XenAPI.Failure(error_info)) db.instance_action_create(context.get_admin_context(), action) except self.XenAPI.Failure, exc: - logging.warn(exc) + LOG.warn(exc) done.send_exception(*sys.exc_info()) + self._stop_loop() def _unwrap_plugin_exceptions(self, func, *args, **kwargs): """Parse exception details""" try: return func(*args, **kwargs) except self.XenAPI.Failure, exc: - logging.debug(_("Got exception: %s"), exc) + LOG.debug(_("Got exception: %s"), exc) if (len(exc.details) == 4 and exc.details[0] == 'XENAPI_PLUGIN_EXCEPTION' and exc.details[2] == 'Failure'): @@ -293,7 +328,7 @@ class XenAPISession(object): else: raise except xmlrpclib.ProtocolError, exc: - logging.debug(_("Got exception: %s"), exc) + LOG.debug(_("Got exception: %s"), exc) raise diff --git a/nova/volume/api.py b/nova/volume/api.py index 2d7fe3762fe3..ce4831cc38b2 100644 --- a/nova/volume/api.py +++ b/nova/volume/api.py @@ -21,11 +21,11 @@ Handles all requests relating to volumes. """ import datetime -import logging from nova import db from nova import exception from nova import flags +from nova import log as logging from nova import quota from nova import rpc from nova.db import base @@ -33,16 +33,18 @@ from nova.db import base FLAGS = flags.FLAGS flags.DECLARE('storage_availability_zone', 'nova.volume.manager') +LOG = logging.getLogger('nova.volume') + class API(base.Base): """API for interacting with the volume manager.""" def create(self, context, size, name, description): if quota.allowed_volumes(context, 1, size) < 1: - logging.warn("Quota exceeeded for %s, tried to create %sG volume", + LOG.warn(_("Quota exceeeded for %s, tried to create %sG volume"), context.project_id, size) - raise quota.QuotaError("Volume quota exceeded. You cannot " - "create a volume of size %s" % size) + raise quota.QuotaError(_("Volume quota exceeded. You cannot " + "create a volume of size %s") % size) options = { 'size': size, diff --git a/nova/volume/driver.py b/nova/volume/driver.py index 8353b9712a3a..5fefa10cf210 100644 --- a/nova/volume/driver.py +++ b/nova/volume/driver.py @@ -20,15 +20,15 @@ Drivers for volumes. """ -import logging -import os import time from nova import exception from nova import flags +from nova import log as logging from nova import utils +LOG = logging.getLogger("nova.volume.driver") FLAGS = flags.FLAGS flags.DEFINE_string('volume_group', 'nova-volumes', 'Name for the VG that will contain exported volumes') @@ -47,8 +47,10 @@ flags.DEFINE_integer('iscsi_num_targets', 'Number of iscsi target ids per host') flags.DEFINE_string('iscsi_target_prefix', 'iqn.2010-10.org.openstack:', 'prefix for iscsi volumes') -flags.DEFINE_string('iscsi_ip_prefix', '127.0', +flags.DEFINE_string('iscsi_ip_prefix', '$my_ip', 'discover volumes on the ip that starts with this prefix') +flags.DEFINE_string('rbd_pool', 'rbd', + 'the rbd pool in which volumes are stored') class VolumeDriver(object): @@ -73,13 +75,15 @@ class VolumeDriver(object): tries = tries + 1 if tries >= FLAGS.num_shell_tries: raise - logging.exception(_("Recovering from a failed execute." - "Try number %s"), tries) + LOG.exception(_("Recovering from a failed execute. " + "Try number %s"), tries) time.sleep(tries ** 2) def check_for_setup_error(self): """Returns an error if prerequisites aren't met""" - if not os.path.isdir("/dev/%s" % FLAGS.volume_group): + out, err = self._execute("sudo vgs --noheadings -o name") + volume_groups = out.split() + if not FLAGS.volume_group in volume_groups: raise exception.Error(_("volume group %s doesn't exist") % FLAGS.volume_group) @@ -205,7 +209,7 @@ class FakeAOEDriver(AOEDriver): @staticmethod def fake_execute(cmd, *_args, **_kwargs): """Execute that simply logs the command.""" - logging.debug(_("FAKE AOE: %s"), cmd) + LOG.debug(_("FAKE AOE: %s"), cmd) return (None, None) @@ -281,7 +285,8 @@ class ISCSIDriver(VolumeDriver): self._execute("sudo iscsiadm -m node -T %s -p %s --op update " "-n node.startup -v automatic" % (iscsi_name, iscsi_portal)) - return "/dev/iscsi/%s" % volume['name'] + return "/dev/disk/by-path/ip-%s-iscsi-%s-lun-0" % (iscsi_portal, + iscsi_name) def undiscover_volume(self, volume): """Undiscover volume on a remote host.""" @@ -310,5 +315,109 @@ class FakeISCSIDriver(ISCSIDriver): @staticmethod def fake_execute(cmd, *_args, **_kwargs): """Execute that simply logs the command.""" - logging.debug(_("FAKE ISCSI: %s"), cmd) + LOG.debug(_("FAKE ISCSI: %s"), cmd) return (None, None) + + +class RBDDriver(VolumeDriver): + """Implements RADOS block device (RBD) volume commands""" + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met""" + (stdout, stderr) = self._execute("rados lspools") + pools = stdout.split("\n") + if not FLAGS.rbd_pool in pools: + raise exception.Error(_("rbd has no pool %s") % + FLAGS.rbd_pool) + + def create_volume(self, volume): + """Creates a logical volume.""" + if int(volume['size']) == 0: + size = 100 + else: + size = int(volume['size']) * 1024 + self._try_execute("rbd --pool %s --size %d create %s" % + (FLAGS.rbd_pool, + size, + volume['name'])) + + def delete_volume(self, volume): + """Deletes a logical volume.""" + self._try_execute("rbd --pool %s rm %s" % + (FLAGS.rbd_pool, + volume['name'])) + + def local_path(self, volume): + """Returns the path of the rbd volume.""" + # This is the same as the remote path + # since qemu accesses it directly. + return self.discover_volume(volume) + + def ensure_export(self, context, volume): + """Synchronously recreates an export for a logical volume.""" + pass + + def create_export(self, context, volume): + """Exports the volume""" + pass + + def remove_export(self, context, volume): + """Removes an export for a logical volume""" + pass + + def discover_volume(self, volume): + """Discover volume on a remote host""" + return "rbd:%s/%s" % (FLAGS.rbd_pool, volume['name']) + + def undiscover_volume(self, volume): + """Undiscover volume on a remote host""" + pass + + +class SheepdogDriver(VolumeDriver): + """Executes commands relating to Sheepdog Volumes""" + + def check_for_setup_error(self): + """Returns an error if prerequisites aren't met""" + try: + (out, err) = self._execute("collie cluster info") + if not out.startswith('running'): + raise exception.Error(_("Sheepdog is not working: %s") % out) + except exception.ProcessExecutionError: + raise exception.Error(_("Sheepdog is not working")) + + def create_volume(self, volume): + """Creates a sheepdog volume""" + if int(volume['size']) == 0: + sizestr = '100M' + else: + sizestr = '%sG' % volume['size'] + self._try_execute("qemu-img create sheepdog:%s %s" % + (volume['name'], sizestr)) + + def delete_volume(self, volume): + """Deletes a logical volume""" + self._try_execute("collie vdi delete %s" % volume['name']) + + def local_path(self, volume): + return "sheepdog:%s" % volume['name'] + + def ensure_export(self, context, volume): + """Safely and synchronously recreates an export for a logical volume""" + pass + + def create_export(self, context, volume): + """Exports the volume""" + pass + + def remove_export(self, context, volume): + """Removes an export for a logical volume""" + pass + + def discover_volume(self, volume): + """Discover volume on a remote host""" + return "sheepdog:%s" % volume['name'] + + def undiscover_volume(self, volume): + """Undiscover volume on a remote host""" + pass diff --git a/nova/volume/manager.py b/nova/volume/manager.py index 966334c50dda..6348539c50d4 100644 --- a/nova/volume/manager.py +++ b/nova/volume/manager.py @@ -42,17 +42,18 @@ intact. """ -import logging import datetime from nova import context from nova import exception from nova import flags +from nova import log as logging from nova import manager from nova import utils +LOG = logging.getLogger('nova.volume.manager') FLAGS = flags.FLAGS flags.DEFINE_string('storage_availability_zone', 'nova', @@ -81,7 +82,7 @@ class VolumeManager(manager.Manager): self.driver.check_for_setup_error() ctxt = context.get_admin_context() volumes = self.db.volume_get_all_by_host(ctxt, self.host) - logging.debug(_("Re-exporting %s volumes"), len(volumes)) + LOG.debug(_("Re-exporting %s volumes"), len(volumes)) for volume in volumes: self.driver.ensure_export(ctxt, volume) @@ -89,7 +90,7 @@ class VolumeManager(manager.Manager): """Creates and exports the volume.""" context = context.elevated() volume_ref = self.db.volume_get(context, volume_id) - logging.info(_("volume %s: creating"), volume_ref['name']) + LOG.info(_("volume %s: creating"), volume_ref['name']) self.db.volume_update(context, volume_id, @@ -98,18 +99,18 @@ class VolumeManager(manager.Manager): # before passing it to the driver. volume_ref['host'] = self.host - logging.debug(_("volume %s: creating lv of size %sG"), - volume_ref['name'], volume_ref['size']) + LOG.debug(_("volume %s: creating lv of size %sG"), volume_ref['name'], + volume_ref['size']) self.driver.create_volume(volume_ref) - logging.debug(_("volume %s: creating export"), volume_ref['name']) + LOG.debug(_("volume %s: creating export"), volume_ref['name']) self.driver.create_export(context, volume_ref) now = datetime.datetime.utcnow() self.db.volume_update(context, volume_ref['id'], {'status': 'available', 'launched_at': now}) - logging.debug(_("volume %s: created successfully"), volume_ref['name']) + LOG.debug(_("volume %s: created successfully"), volume_ref['name']) return volume_id def delete_volume(self, context, volume_id): @@ -120,12 +121,12 @@ class VolumeManager(manager.Manager): raise exception.Error(_("Volume is still attached")) if volume_ref['host'] != self.host: raise exception.Error(_("Volume is not local to this node")) - logging.debug(_("volume %s: removing export"), volume_ref['name']) + LOG.debug(_("volume %s: removing export"), volume_ref['name']) self.driver.remove_export(context, volume_ref) - logging.debug(_("volume %s: deleting"), volume_ref['name']) + LOG.debug(_("volume %s: deleting"), volume_ref['name']) self.driver.delete_volume(volume_ref) self.db.volume_destroy(context, volume_id) - logging.debug(_("volume %s: deleted successfully"), volume_ref['name']) + LOG.debug(_("volume %s: deleted successfully"), volume_ref['name']) return True def setup_compute_volume(self, context, volume_id): diff --git a/nova/wsgi.py b/nova/wsgi.py index b5d6b96c1592..4f5307d80a2d 100644 --- a/nova/wsgi.py +++ b/nova/wsgi.py @@ -21,8 +21,7 @@ Utility methods for working with WSGI servers """ -import json -import logging +import os import sys from xml.dom import minidom @@ -35,18 +34,37 @@ import webob import webob.dec import webob.exc +from paste import deploy -logging.getLogger("routes.middleware").addHandler(logging.StreamHandler()) +from nova import flags +from nova import log as logging +from nova import utils + + +FLAGS = flags.FLAGS + + +class WritableLogger(object): + """A thin wrapper that responds to `write` and logs.""" + + def __init__(self, logger, level=logging.DEBUG): + self.logger = logger + self.level = level + + def write(self, msg): + self.logger.log(self.level, msg) class Server(object): """Server class to manage multiple WSGI sockets and applications.""" def __init__(self, threads=1000): + logging.basicConfig() self.pool = eventlet.GreenPool(threads) def start(self, application, port, host='0.0.0.0', backlog=128): """Run a WSGI server with the given application.""" + logging.audit(_("Starting %s on %s:%s"), sys.argv[0], host, port) socket = eventlet.listen((host, port), backlog=backlog) self.pool.spawn_n(self._run, application, socket) @@ -59,14 +77,39 @@ class Server(object): def _run(self, application, socket): """Start a WSGI server in a new green thread.""" - eventlet.wsgi.server(socket, application, custom_pool=self.pool) + logger = logging.getLogger('eventlet.wsgi.server') + eventlet.wsgi.server(socket, application, custom_pool=self.pool, + log=WritableLogger(logger)) class Application(object): -# TODO(gundlach): I think we should toss this class, now that it has no -# purpose. """Base WSGI application wrapper. Subclasses need to implement __call__.""" + @classmethod + def factory(cls, global_config, **local_config): + """Used for paste app factories in paste.deploy config fles. + + Any local configuration (that is, values under the [app:APPNAME] + section of the paste config) will be passed into the `__init__` method + as kwargs. + + A hypothetical configuration would look like: + + [app:wadl] + latest_version = 1.3 + paste.app_factory = nova.api.fancy_api:Wadl.factory + + which would result in a call to the `Wadl` class as + + import nova.api.fancy_api + fancy_api.Wadl(latest_version='1.3') + + You could of course re-implement the `factory` method in subclasses, + but using the kwarg passing it shouldn't be necessary. + + """ + return cls(**local_config) + def __call__(self, environ, start_response): r"""Subclasses will probably want to implement __call__ like this: @@ -104,20 +147,65 @@ class Application(object): class Middleware(Application): - """ - Base WSGI middleware wrapper. These classes require an application to be + """Base WSGI middleware. + + These classes require an application to be initialized that will be called next. By default the middleware will simply call its wrapped app, or you can override __call__ to customize its behavior. """ - def __init__(self, application): # pylint: disable-msg=W0231 + @classmethod + def factory(cls, global_config, **local_config): + """Used for paste app factories in paste.deploy config fles. + + Any local configuration (that is, values under the [filter:APPNAME] + section of the paste config) will be passed into the `__init__` method + as kwargs. + + A hypothetical configuration would look like: + + [filter:analytics] + redis_host = 127.0.0.1 + paste.filter_factory = nova.api.analytics:Analytics.factory + + which would result in a call to the `Analytics` class as + + import nova.api.analytics + analytics.Analytics(app_from_paste, redis_host='127.0.0.1') + + You could of course re-implement the `factory` method in subclasses, + but using the kwarg passing it shouldn't be necessary. + + """ + def _factory(app): + return cls(app, **local_config) + return _factory + + def __init__(self, application): self.application = application + def process_request(self, req): + """Called on each request. + + If this returns None, the next application down the stack will be + executed. If it returns a response then that response will be returned + and execution will stop here. + + """ + return None + + def process_response(self, response): + """Do whatever you'd like to the response.""" + return response + @webob.dec.wsgify - def __call__(self, req): # pylint: disable-msg=W0221 - """Override to implement middleware behavior.""" - return self.application + def __call__(self, req): + response = self.process_request(req) + if response: + return response + response = req.get_response(self.application) + return self.process_response(response) class Debug(Middleware): @@ -303,7 +391,7 @@ class Serializer(object): try: is_xml = (datastring[0] == '<') if not is_xml: - return json.loads(datastring) + return utils.loads(datastring) return self._from_xml(datastring) except: return None @@ -336,7 +424,7 @@ class Serializer(object): return result def _to_json(self, data): - return json.dumps(data) + return utils.dumps(data) def _to_xml(self, data): metadata = self.metadata.get('application/xml', {}) @@ -372,3 +460,64 @@ class Serializer(object): node = doc.createTextNode(str(data)) result.appendChild(node) return result + + +def paste_config_file(basename): + """Find the best location in the system for a paste config file. + + Search Order + ------------ + + The search for a paste config file honors `FLAGS.state_path`, which in a + version checked out from bzr will be the `nova` directory in the top level + of the checkout, and in an installation for a package for your distribution + will likely point to someplace like /etc/nova. + + This method tries to load places likely to be used in development or + experimentation before falling back to the system-wide configuration + in `/etc/nova/`. + + * Current working directory + * the `etc` directory under state_path, because when working on a checkout + from bzr this will point to the default + * top level of FLAGS.state_path, for distributions + * /etc/nova, which may not be diffrerent from state_path on your distro + + """ + + configfiles = [basename, + os.path.join(FLAGS.state_path, 'etc', basename), + os.path.join(FLAGS.state_path, basename), + '/etc/nova/%s' % basename] + for configfile in configfiles: + if os.path.exists(configfile): + return configfile + + +def load_paste_configuration(filename, appname): + """Returns a paste configuration dict, or None.""" + filename = os.path.abspath(filename) + config = None + try: + config = deploy.appconfig("config:%s" % filename, name=appname) + except LookupError: + pass + return config + + +def load_paste_app(filename, appname): + """Builds a wsgi app from a paste config, None if app not configured.""" + filename = os.path.abspath(filename) + app = None + try: + app = deploy.loadapp("config:%s" % filename, name=appname) + except LookupError: + pass + return app + + +def paste_config_to_flags(config, mixins): + for k, v in mixins.iteritems(): + value = config.get(k, v) + converted_value = FLAGS[k].parser.Parse(value) + setattr(FLAGS, k, converted_value) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent new file mode 100755 index 000000000000..12c3a19c8d0c --- /dev/null +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/agent @@ -0,0 +1,126 @@ +#!/usr/bin/env python + +# Copyright (c) 2011 Citrix Systems, Inc. +# Copyright 2011 OpenStack LLC. +# Copyright 2011 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# +# XenAPI plugin for reading/writing information to xenstore +# + +try: + import json +except ImportError: + import simplejson as json +import os +import random +import subprocess +import tempfile +import time + +import XenAPIPlugin + +from pluginlib_nova import * +configure_logging("xenstore") +import xenstore + +AGENT_TIMEOUT = 30 + + +def jsonify(fnc): + def wrapper(*args, **kwargs): + return json.dumps(fnc(*args, **kwargs)) + return wrapper + + +class TimeoutError(StandardError): + pass + + +@jsonify +def key_init(self, arg_dict): + """Handles the Diffie-Hellman key exchange with the agent to + establish the shared secret key used to encrypt/decrypt sensitive + info to be passed, such as passwords. Returns the shared + secret key value. + """ + pub = int(arg_dict["pub"]) + arg_dict["value"] = json.dumps({"name": "keyinit", "value": pub}) + request_id = arg_dict["id"] + arg_dict["path"] = "data/host/%s" % request_id + xenstore.write_record(self, arg_dict) + try: + resp = _wait_for_agent(self, request_id, arg_dict) + except TimeoutError, e: + raise PluginError("%s" % e) + return resp + + +@jsonify +def password(self, arg_dict): + """Writes a request to xenstore that tells the agent to set + the root password for the given VM. The password should be + encrypted using the shared secret key that was returned by a + previous call to key_init. The encrypted password value should + be passed as the value for the 'enc_pass' key in arg_dict. + """ + pub = int(arg_dict["pub"]) + enc_pass = arg_dict["enc_pass"] + arg_dict["value"] = json.dumps({"name": "password", "value": enc_pass}) + request_id = arg_dict["id"] + arg_dict["path"] = "data/host/%s" % request_id + xenstore.write_record(self, arg_dict) + try: + resp = _wait_for_agent(self, request_id, arg_dict) + except TimeoutError, e: + raise PluginError("%s" % e) + return resp + + +def _wait_for_agent(self, request_id, arg_dict): + """Periodically checks xenstore for a response from the agent. + The request is always written to 'data/host/{id}', and + the agent's response for that request will be in 'data/guest/{id}'. + If no value appears from the agent within the time specified by + AGENT_TIMEOUT, the original request is deleted and a TimeoutError + is returned. + """ + arg_dict["path"] = "data/guest/%s" % request_id + arg_dict["ignore_missing_path"] = True + start = time.time() + while True: + if time.time() - start > AGENT_TIMEOUT: + # No response within the timeout period; bail out + # First, delete the request record + arg_dict["path"] = "data/host/%s" % request_id + xenstore.delete_record(self, arg_dict) + raise TimeoutError("TIMEOUT: No response from agent within %s seconds." % + AGENT_TIMEOUT) + ret = xenstore.read_record(self, arg_dict) + # Note: the response for None with be a string that includes + # double quotes. + if ret != '"None"': + # The agent responded + return ret + else: + time.sleep(3) + + +if __name__ == "__main__": + XenAPIPlugin.dispatch( + {"key_init": key_init, + "password": password}) diff --git a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance index 5e648b97023d..aadacce57456 100644 --- a/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance +++ b/plugins/xenserver/xenapi/etc/xapi.d/plugins/glance @@ -18,7 +18,7 @@ # under the License. # -# XenAPI plugin for putting images into glance +# XenAPI plugin for managing glance images # import base64 @@ -40,29 +40,57 @@ from pluginlib_nova import * configure_logging('glance') CHUNK_SIZE = 8192 +KERNEL_DIR = '/boot/guest' FILE_SR_PATH = '/var/run/sr-mount' +def copy_kernel_vdi(session,args): + vdi = exists(args, 'vdi-ref') + size = exists(args,'image-size') + #Use the uuid as a filename + vdi_uuid=session.xenapi.VDI.get_uuid(vdi) + copy_args={'vdi_uuid':vdi_uuid,'vdi_size':int(size)} + filename=with_vdi_in_dom0(session, vdi, False, + lambda dev: + _copy_kernel_vdi('/dev/%s' % dev,copy_args)) + return filename + +def _copy_kernel_vdi(dest,copy_args): + vdi_uuid=copy_args['vdi_uuid'] + vdi_size=copy_args['vdi_size'] + logging.debug("copying kernel/ramdisk file from %s to /boot/guest/%s",dest,vdi_uuid) + filename=KERNEL_DIR + '/' + vdi_uuid + #read data from /dev/ and write into a file on /boot/guest + of=open(filename,'wb') + f=open(dest,'rb') + #copy only vdi_size bytes + data=f.read(vdi_size) + of.write(data) + f.close() + of.close() + logging.debug("Done. Filename: %s",filename) + return filename + def put_vdis(session, args): params = pickle.loads(exists(args, 'params')) vdi_uuids = params["vdi_uuids"] - image_name = params["image_name"] + image_id = params["image_id"] glance_host = params["glance_host"] glance_port = params["glance_port"] sr_path = get_sr_path(session) #FIXME(sirp): writing to a temp file until Glance supports chunked-PUTs - tmp_file = "%s.tar.gz" % os.path.join('/tmp', image_name) + tmp_file = "%s.tar.gz" % os.path.join('/tmp', str(image_id)) tar_cmd = ['tar', '-zcf', tmp_file, '--directory=%s' % sr_path] paths = [ "%s.vhd" % vdi_uuid for vdi_uuid in vdi_uuids ] tar_cmd.extend(paths) logging.debug("Bundling image with cmd: %s", tar_cmd) subprocess.call(tar_cmd) logging.debug("Writing to test file %s", tmp_file) - put_bundle_in_glance(tmp_file, image_name, glance_host, glance_port) + put_bundle_in_glance(tmp_file, image_id, glance_host, glance_port) return "" # FIXME(sirp): return anything useful here? -def put_bundle_in_glance(tmp_file, image_name, glance_host, glance_port): +def put_bundle_in_glance(tmp_file, image_id, glance_host, glance_port): size = os.path.getsize(tmp_file) basename = os.path.basename(tmp_file) @@ -72,7 +100,6 @@ def put_bundle_in_glance(tmp_file, image_name, glance_host, glance_port): 'x-image-meta-store': 'file', 'x-image-meta-is_public': 'True', 'x-image-meta-type': 'raw', - 'x-image-meta-name': image_name, 'x-image-meta-size': size, 'content-length': size, 'content-type': 'application/octet-stream', @@ -80,7 +107,7 @@ def put_bundle_in_glance(tmp_file, image_name, glance_host, glance_port): conn = httplib.HTTPConnection(glance_host, glance_port) #NOTE(sirp): httplib under python2.4 won't accept a file-like object # to request - conn.putrequest('POST', '/images') + conn.putrequest('PUT', '/images/%s' % image_id) for header, value in headers.iteritems(): conn.putheader(header, value) @@ -129,4 +156,5 @@ def find_sr(session): if __name__ == '__main__': - XenAPIPlugin.dispatch({'put_vdis': put_vdis}) + XenAPIPlugin.dispatch({'put_vdis': put_vdis, + 'copy_kernel_vdi': copy_kernel_vdi}) diff --git a/run_tests.py b/run_tests.py index 5b8617f63dfb..7b5e2192aa81 100644 --- a/run_tests.py +++ b/run_tests.py @@ -17,7 +17,7 @@ # See the License for the specific language governing permissions and # limitations under the License. - +import gettext import os import unittest import sys diff --git a/setup.cfg b/setup.cfg index 14dcb5c8edde..9c0a331e35e2 100644 --- a/setup.cfg +++ b/setup.cfg @@ -8,3 +8,17 @@ tag_build = tag_date = 0 tag_svn_revision = 0 +[compile_catalog] +directory = locale +domain = nova + +[update_catalog] +domain = nova +output_dir = locale +input_file = locale/nova.pot + +[extract_messages] +keywords = _ l_ lazy_gettext +mapping_file = babel.cfg +output_file = locale/nova.pot + diff --git a/setup.py b/setup.py index 1abf4d9fe964..3608ff805a32 100644 --- a/setup.py +++ b/setup.py @@ -24,6 +24,15 @@ from setuptools.command.sdist import sdist from sphinx.setup_command import BuildDoc from nova.utils import parse_mailmap, str_dict_replace +from nova import version + +if os.path.isdir('.bzr'): + with open("nova/vcsversion.py", 'w') as version_file: + vcs_cmd = subprocess.Popen(["bzr", "version-info", "--python"], + stdout=subprocess.PIPE) + vcsversion = vcs_cmd.communicate()[0] + version_file.write(vcsversion) + class local_BuildDoc(BuildDoc): def run(self): @@ -48,14 +57,25 @@ class local_sdist(sdist): changelog_file.write(str_dict_replace(changelog, mailmap)) sdist.run(self) +nova_cmdclass= { 'sdist': local_sdist, + 'build_sphinx' : local_BuildDoc } + +try: + from babel.messages import frontend as babel + nova_cmdclass['compile_catalog'] = babel.compile_catalog + nova_cmdclass['extract_messages'] = babel.extract_messages + nova_cmdclass['init_catalog'] = babel.init_catalog + nova_cmdclass['update_catalog'] = babel.update_catalog +except: + pass + setup(name='nova', - version='2011.1', + version=version.canonical_version_string(), description='cloud computing fabric controller', author='OpenStack', author_email='nova@lists.launchpad.net', url='http://www.openstack.org/', - cmdclass={ 'sdist': local_sdist, - 'build_sphinx' : local_BuildDoc }, + cmdclass=nova_cmdclass, packages=find_packages(exclude=['bin', 'smoketests']), include_package_data=True, test_suite='nose.collector', @@ -64,9 +84,11 @@ setup(name='nova', 'bin/nova-dhcpbridge', 'bin/nova-import-canonical-imagestore', 'bin/nova-instancemonitor', + 'bin/nova-logspool', 'bin/nova-manage', 'bin/nova-network', 'bin/nova-objectstore', 'bin/nova-scheduler', + 'bin/nova-spoolsentry', 'bin/nova-volume', 'tools/nova-debug']) diff --git a/smoketests/admin_smoketests.py b/smoketests/admin_smoketests.py index 1ef1c14251ee..86a7f600d37a 100644 --- a/smoketests/admin_smoketests.py +++ b/smoketests/admin_smoketests.py @@ -43,7 +43,7 @@ flags.DEFINE_string('suite', None, 'Specific test suite to run ' + SUITE_NAMES) # TODO(devamcar): Use random tempfile ZIP_FILENAME = '/tmp/nova-me-x509.zip' -TEST_PREFIX = 'test%s' % int(random.random()*1000000) +TEST_PREFIX = 'test%s' % int(random.random() * 1000000) TEST_USERNAME = '%suser' % TEST_PREFIX TEST_PROJECTNAME = '%sproject' % TEST_PREFIX @@ -96,4 +96,3 @@ class UserTests(AdminSmokeTestCase): if __name__ == "__main__": suites = {'user': unittest.makeSuite(UserTests)} sys.exit(base.run_tests(suites)) - diff --git a/smoketests/base.py b/smoketests/base.py index 5a14d3e098af..610270c5cc3a 100644 --- a/smoketests/base.py +++ b/smoketests/base.py @@ -17,6 +17,7 @@ # under the License. import boto +import boto_v6 import commands import httplib import os @@ -69,6 +70,17 @@ class SmokeTestCase(unittest.TestCase): 'test.') parts = self.split_clc_url(clc_url) + if FLAGS.use_ipv6: + return boto_v6.connect_ec2(aws_access_key_id=access_key, + aws_secret_access_key=secret_key, + is_secure=parts['is_secure'], + region=RegionInfo(None, + 'nova', + parts['ip']), + port=parts['port'], + path='/services/Cloud', + **kwargs) + return boto.connect_ec2(aws_access_key_id=access_key, aws_secret_access_key=secret_key, is_secure=parts['is_secure'], @@ -115,7 +127,8 @@ class SmokeTestCase(unittest.TestCase): return True def upload_image(self, bucket_name, image): - cmd = 'euca-upload-bundle -b %s -m /tmp/%s.manifest.xml' % (bucket_name, image) + cmd = 'euca-upload-bundle -b ' + cmd += '%s -m /tmp/%s.manifest.xml' % (bucket_name, image) status, output = commands.getstatusoutput(cmd) if status != 0: print '%s -> \n %s' % (cmd, output) @@ -130,6 +143,7 @@ class SmokeTestCase(unittest.TestCase): raise Exception(output) return True + def run_tests(suites): argv = FLAGS(sys.argv) @@ -151,4 +165,3 @@ def run_tests(suites): else: for suite in suites.itervalues(): unittest.TextTestRunner(verbosity=2).run(suite) - diff --git a/smoketests/flags.py b/smoketests/flags.py index ae4d0950850d..35f432a77800 100644 --- a/smoketests/flags.py +++ b/smoketests/flags.py @@ -33,6 +33,7 @@ DEFINE_bool = DEFINE_bool # __GLOBAL FLAGS ONLY__ # Define any app-specific flags in their own files, docs at: # http://code.google.com/p/python-gflags/source/browse/trunk/gflags.py#39 + DEFINE_string('region', 'nova', 'Region to use') DEFINE_string('test_image', 'ami-tiny', 'Image to use for launch tests') - +DEFINE_string('use_ipv6', True, 'use the ipv6 or not') diff --git a/smoketests/public_network_smoketests.py b/smoketests/public_network_smoketests.py new file mode 100644 index 000000000000..bfc2b20ba103 --- /dev/null +++ b/smoketests/public_network_smoketests.py @@ -0,0 +1,180 @@ +# vim: tabstop=4 shiftwidth=4 softtabstop=4 + +# Copyright 2010 United States Government as represented by the +# Administrator of the National Aeronautics and Space Administration. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import commands +import os +import random +import socket +import sys +import time +import unittest + +from smoketests import flags +from smoketests import base +from smoketests import user_smoketests + +#Note that this test should run from +#public network (outside of private network segments) +#Please set EC2_URL correctly +#You should use admin account in this test + +FLAGS = flags.FLAGS + +TEST_PREFIX = 'test%s' % int(random.random() * 1000000) +TEST_BUCKET = '%s_bucket' % TEST_PREFIX +TEST_KEY = '%s_key' % TEST_PREFIX +TEST_KEY2 = '%s_key2' % TEST_PREFIX +TEST_DATA = {} + + +class InstanceTestsFromPublic(user_smoketests.UserSmokeTestCase): + def test_001_can_create_keypair(self): + key = self.create_key_pair(self.conn, TEST_KEY) + self.assertEqual(key.name, TEST_KEY) + + def test_002_security_group(self): + security_group_name = "".join(random.choice("sdiuisudfsdcnpaqwertasd") + for x in range(random.randint(4, 8))) + group = self.conn.create_security_group(security_group_name, + 'test group') + group.connection = self.conn + group.authorize('tcp', 22, 22, '0.0.0.0/0') + if FLAGS.use_ipv6: + group.authorize('tcp', 22, 22, '::/0') + + reservation = self.conn.run_instances(FLAGS.test_image, + key_name=TEST_KEY, + security_groups=[security_group_name], + instance_type='m1.tiny') + self.data['security_group_name'] = security_group_name + self.data['group'] = group + self.data['instance_id'] = reservation.instances[0].id + + def test_003_instance_with_group_runs_within_60_seconds(self): + reservations = self.conn.get_all_instances([self.data['instance_id']]) + instance = reservations[0].instances[0] + # allow 60 seconds to exit pending with IP + for x in xrange(60): + instance.update() + if instance.state == u'running': + break + time.sleep(1) + else: + self.fail('instance failed to start') + ip = reservations[0].instances[0].private_dns_name + self.failIf(ip == '0.0.0.0') + self.data['private_ip'] = ip + if FLAGS.use_ipv6: + ipv6 = reservations[0].instances[0].dns_name_v6 + self.failIf(ipv6 is None) + self.data['ip_v6'] = ipv6 + + def test_004_can_ssh_to_ipv6(self): + if FLAGS.use_ipv6: + for x in xrange(20): + try: + conn = self.connect_ssh( + self.data['ip_v6'], TEST_KEY) + conn.close() + except Exception as ex: + print ex + time.sleep(1) + else: + break + else: + self.fail('could not ssh to instance') + + def test_012_can_create_instance_with_keypair(self): + if 'instance_id' in self.data: + self.conn.terminate_instances([self.data['instance_id']]) + reservation = self.conn.run_instances(FLAGS.test_image, + key_name=TEST_KEY, + instance_type='m1.tiny') + self.assertEqual(len(reservation.instances), 1) + self.data['instance_id'] = reservation.instances[0].id + + def test_013_instance_runs_within_60_seconds(self): + reservations = self.conn.get_all_instances([self.data['instance_id']]) + instance = reservations[0].instances[0] + # allow 60 seconds to exit pending with IP + for x in xrange(60): + instance.update() + if instance.state == u'running': + break + time.sleep(1) + else: + self.fail('instance failed to start') + ip = reservations[0].instances[0].private_dns_name + self.failIf(ip == '0.0.0.0') + self.data['private_ip'] = ip + if FLAGS.use_ipv6: + ipv6 = reservations[0].instances[0].dns_name_v6 + self.failIf(ipv6 is None) + self.data['ip_v6'] = ipv6 + + def test_014_can_not_ping_private_ip(self): + for x in xrange(4): + # ping waits for 1 second + status, output = commands.getstatusoutput( + 'ping -c1 %s' % self.data['private_ip']) + if status == 0: + self.fail('can ping private ip from public network') + if FLAGS.use_ipv6: + status, output = commands.getstatusoutput( + 'ping6 -c1 %s' % self.data['ip_v6']) + if status == 0: + self.fail('can ping ipv6 from public network') + else: + pass + + def test_015_can_not_ssh_to_private_ip(self): + for x in xrange(1): + try: + conn = self.connect_ssh(self.data['private_ip'], TEST_KEY) + conn.close() + except Exception: + time.sleep(1) + else: + self.fail('can ssh for ipv4 address from public network') + + if FLAGS.use_ipv6: + for x in xrange(1): + try: + conn = self.connect_ssh( + self.data['ip_v6'], TEST_KEY) + conn.close() + except Exception: + time.sleep(1) + else: + self.fail('can ssh for ipv6 address from public network') + + def test_999_tearDown(self): + self.delete_key_pair(self.conn, TEST_KEY) + security_group_name = self.data['security_group_name'] + group = self.data['group'] + if group: + group.revoke('tcp', 22, 22, '0.0.0.0/0') + if FLAGS.use_ipv6: + group.revoke('tcp', 22, 22, '::/0') + self.conn.delete_security_group(security_group_name) + if 'instance_id' in self.data: + self.conn.terminate_instances([self.data['instance_id']]) + +if __name__ == "__main__": + suites = {'instance': unittest.makeSuite(InstanceTestsFromPublic)} + sys.exit(base.run_tests(suites)) diff --git a/smoketests/user_smoketests.py b/smoketests/user_smoketests.py index 578c0722ee8c..d5a3a7556f2f 100644 --- a/smoketests/user_smoketests.py +++ b/smoketests/user_smoketests.py @@ -45,7 +45,7 @@ flags.DEFINE_string('bundle_kernel', 'openwrt-x86-vmlinuz', flags.DEFINE_string('bundle_image', 'openwrt-x86-ext2.image', 'Local image file to use for bundling tests') -TEST_PREFIX = 'test%s' % int (random.random()*1000000) +TEST_PREFIX = 'test%s' % int(random.random() * 1000000) TEST_BUCKET = '%s_bucket' % TEST_PREFIX TEST_KEY = '%s_key' % TEST_PREFIX TEST_GROUP = '%s_group' % TEST_PREFIX @@ -80,7 +80,7 @@ class ImageTests(UserSmokeTestCase): def test_006_can_register_kernel(self): kernel_id = self.conn.register_image('%s/%s.manifest.xml' % - (TEST_BUCKET, FLAGS.bundle_kernel)) + (TEST_BUCKET, FLAGS.bundle_kernel)) self.assert_(kernel_id is not None) self.data['kernel_id'] = kernel_id @@ -92,7 +92,7 @@ class ImageTests(UserSmokeTestCase): time.sleep(1) else: print image.state - self.assert_(False) # wasn't available within 10 seconds + self.assert_(False) # wasn't available within 10 seconds self.assert_(image.type == 'machine') for i in xrange(10): @@ -101,7 +101,7 @@ class ImageTests(UserSmokeTestCase): break time.sleep(1) else: - self.assert_(False) # wasn't available within 10 seconds + self.assert_(False) # wasn't available within 10 seconds self.assert_(kernel.type == 'kernel') def test_008_can_describe_image_attribute(self): @@ -152,14 +152,17 @@ class InstanceTests(UserSmokeTestCase): for x in xrange(60): instance.update() if instance.state == u'running': - break + break time.sleep(1) else: self.fail('instance failed to start') ip = reservations[0].instances[0].private_dns_name self.failIf(ip == '0.0.0.0') self.data['private_ip'] = ip - print self.data['private_ip'] + if FLAGS.use_ipv6: + ipv6 = reservations[0].instances[0].dns_name_v6 + self.failIf(ipv6 is None) + self.data['ip_v6'] = ipv6 def test_004_can_ping_private_ip(self): for x in xrange(120): @@ -171,6 +174,16 @@ class InstanceTests(UserSmokeTestCase): else: self.fail('could not ping instance') + if FLAGS.use_ipv6: + for x in xrange(120): + # ping waits for 1 second + status, output = commands.getstatusoutput( + 'ping6 -c1 %s' % self.data['ip_v6']) + if status == 0: + break + else: + self.fail('could not ping instance') + def test_005_can_ssh_to_private_ip(self): for x in xrange(30): try: @@ -183,6 +196,19 @@ class InstanceTests(UserSmokeTestCase): else: self.fail('could not ssh to instance') + if FLAGS.use_ipv6: + for x in xrange(30): + try: + conn = self.connect_ssh( + self.data['ip_v6'], TEST_KEY) + conn.close() + except Exception: + time.sleep(1) + else: + break + else: + self.fail('could not ssh to instance v6') + def test_006_can_allocate_elastic_ip(self): result = self.conn.allocate_address() self.assertTrue(hasattr(result, 'public_ip')) @@ -388,7 +414,6 @@ class SecurityGroupTests(UserSmokeTestCase): raise Exception("Timeout") time.sleep(1) - def test_999_tearDown(self): self.conn.delete_key_pair(TEST_KEY) self.conn.delete_security_group(TEST_GROUP) diff --git a/tools/ajaxterm/README.txt b/tools/ajaxterm/README.txt new file mode 100644 index 000000000000..4b0ae99afa2e --- /dev/null +++ b/tools/ajaxterm/README.txt @@ -0,0 +1,120 @@ += [http://antony.lesuisse.org/qweb/trac/wiki/AjaxTerm Ajaxterm] = + +Ajaxterm is a web based terminal. It was totally inspired and works almost +exactly like http://anyterm.org/ except it's much easier to install (see +comparaison with anyterm below). + +Ajaxterm written in python (and some AJAX javascript for client side) and depends only on python2.3 or better.[[BR]] +Ajaxterm is '''very simple to install''' on Linux, MacOS X, FreeBSD, Solaris, cygwin and any Unix that runs python2.3.[[BR]] +Ajaxterm was written by Antony Lesuisse (email: al AT udev.org), License Public Domain. + +Use the [/qweb/forum/viewforum.php?id=2 Forum], if you have any question or remark. + +== News == + + * 2006-10-29: v0.10 allow space in login, cgi launch fix, redhat init + * 2006-07-12: v0.9 change uid, daemon fix (Daniel Fischer) + * 2006-07-04: v0.8 add login support to ssh (Sven Geggus), change max width to 256 + * 2006-05-31: v0.7 minor fixes, daemon option + * 2006-05-23: v0.6 Applied debian and gentoo patches, renamed to Ajaxterm, default port 8022 + +== Download and Install == + + * Release: [/qweb/files/Ajaxterm-0.10.tar.gz Ajaxterm-0.10.tar.gz] + * Browse src: [/qweb/trac/browser/trunk/ajaxterm/ ajaxterm/] + +To install Ajaxterm issue the following commands: +{{{ +wget http://antony.lesuisse.org/qweb/files/Ajaxterm-0.10.tar.gz +tar zxvf Ajaxterm-0.10.tar.gz +cd Ajaxterm-0.10 +./ajaxterm.py +}}} +Then point your browser to this URL : http://localhost:8022/ + +== Screenshot == + +{{{ +#!html +
ajaxterm screenshot
+}}} + +== Documentation and Caveats == + + * Ajaxterm only support latin1, if you use Ubuntu or any LANG==en_US.UTF-8 distribution don't forget to "unset LANG". + + * If run as root ajaxterm will run /bin/login, otherwise it will run ssh + localhost. To use an other command use the -c option. + + * By default Ajaxterm only listen at 127.0.0.1:8022. For remote access, it is + strongly recommended to use '''https SSL/TLS''', and that is simple to + configure if you use the apache web server using mod_proxy.[[BR]][[BR]] + Using ssl will also speed up ajaxterm (probably because of keepalive).[[BR]][[BR]] + Here is an configuration example: + +{{{ + Listen 443 + NameVirtualHost *:443 + + + ServerName localhost + SSLEngine On + SSLCertificateKeyFile ssl/apache.pem + SSLCertificateFile ssl/apache.pem + + ProxyRequests Off + + Order deny,allow + Allow from all + + ProxyPass /ajaxterm/ http://localhost:8022/ + ProxyPassReverse /ajaxterm/ http://localhost:8022/ + +}}} + + * Using GET HTTP request seems to speed up ajaxterm, just click on GET in the + interface, but be warned that your keystrokes might be loggued (by apache or + any proxy). I usually enable it after the login. + + * Ajaxterm commandline usage: + +{{{ +usage: ajaxterm.py [options] + +options: + -h, --help show this help message and exit + -pPORT, --port=PORT Set the TCP port (default: 8022) + -cCMD, --command=CMD set the command (default: /bin/login or ssh localhost) + -l, --log log requests to stderr (default: quiet mode) + -d, --daemon run as daemon in the background + -PPIDFILE, --pidfile=PIDFILE + set the pidfile (default: /var/run/ajaxterm.pid) + -iINDEX_FILE, --index=INDEX_FILE + default index file (default: ajaxterm.html) + -uUID, --uid=UID Set the daemon's user id +}}} + + * Ajaxterm was first written as a demo for qweb (my web framework), but + actually doesn't use many features of qweb. + + * Compared to anyterm: + * There are no partial updates, ajaxterm updates either all the screen or + nothing. That make the code simpler and I also think it's faster. HTTP + replies are always gzencoded. When used in 80x25 mode, almost all of + them are below the 1500 bytes (size of an ethernet frame) and we just + replace the screen with the reply (no javascript string handling). + * Ajaxterm polls the server for updates with an exponentially growing + timeout when the screen hasn't changed. The timeout is also resetted as + soon as a key is pressed. Anyterm blocks on a pending request and use a + parallel connection for keypresses. The anyterm approch is better + when there aren't any keypress. + + * Ajaxterm files are released in the Public Domain, (except [http://sarissa.sourceforge.net/doc/ sarissa*] which are LGPL). + +== TODO == + + * insert mode ESC [ 4 h + * change size x,y from gui (sending signal) + * vt102 graphic codepage + * use innerHTML or prototype instead of sarissa + diff --git a/tools/ajaxterm/ajaxterm.1 b/tools/ajaxterm/ajaxterm.1 new file mode 100644 index 000000000000..46f2acb3393d --- /dev/null +++ b/tools/ajaxterm/ajaxterm.1 @@ -0,0 +1,35 @@ +.TH ajaxterm "1" "May 2006" "ajaxterm 0.5" "User commands" +.SH NAME +ajaxterm \- Web based terminal written in python + +.SH DESCRITPION +\fBajaxterm\fR is a web based terminal written in python and some AJAX +javascript for client side. +It can use almost any web browser and even works through firewalls. + +.SH USAGE +\fBajaxterm.py\fR [options] + +.SH OPTIONS +A summary of the options supported by \fBajaxterm\fR is included below. + \fB-h, --help\fR show this help message and exit + \fB-pPORT, --port=PORT\fR Set the TCP port (default: 8022) + \fB-cCMD, --command=CMD\fR set the command (default: /bin/login or ssh localhost) + \fB-l, --log\fR log requests to stderr (default: quiet mode) + +.SH AUTHOR +Antony Lesuisse + +This manual page was written for the Debian system by +Julien Valroff (but may be used by others). + +.SH "REPORTING BUGS" +Report any bugs to the author: Antony Lesuisse + +.SH COPYRIGHT +Copyright Antony Lesuisse + +.SH SEE ALSO +- \fBajaxterm\fR wiki page: http://antony.lesuisse.org/qweb/trac/wiki/AjaxTerm +.br +- \fBajaxterm\fR forum: http://antony.lesuisse.org/qweb/forum/viewforum.php?id=2 diff --git a/tools/ajaxterm/ajaxterm.css b/tools/ajaxterm/ajaxterm.css new file mode 100644 index 000000000000..b9a5f877168e --- /dev/null +++ b/tools/ajaxterm/ajaxterm.css @@ -0,0 +1,64 @@ +pre.stat { + margin: 0px; + padding: 4px; + display: block; + font-family: monospace; + white-space: pre; + background-color: black; + border-top: 1px solid black; + color: white; +} +pre.stat span { + padding: 0px; +} +pre.stat .on { + background-color: #080; + font-weight: bold; + color: white; + cursor: pointer; +} +pre.stat .off { + background-color: #888; + font-weight: bold; + color: white; + cursor: pointer; +} +pre.term { + margin: 0px; + padding: 4px; + display: block; + font-family: monospace; + white-space: pre; + background-color: black; + border-top: 1px solid white; + color: #eee; +} +pre.term span.f0 { color: #000; } +pre.term span.f1 { color: #b00; } +pre.term span.f2 { color: #0b0; } +pre.term span.f3 { color: #bb0; } +pre.term span.f4 { color: #00b; } +pre.term span.f5 { color: #b0b; } +pre.term span.f6 { color: #0bb; } +pre.term span.f7 { color: #bbb; } +pre.term span.f8 { color: #666; } +pre.term span.f9 { color: #f00; } +pre.term span.f10 { color: #0f0; } +pre.term span.f11 { color: #ff0; } +pre.term span.f12 { color: #00f; } +pre.term span.f13 { color: #f0f; } +pre.term span.f14 { color: #0ff; } +pre.term span.f15 { color: #fff; } +pre.term span.b0 { background-color: #000; } +pre.term span.b1 { background-color: #b00; } +pre.term span.b2 { background-color: #0b0; } +pre.term span.b3 { background-color: #bb0; } +pre.term span.b4 { background-color: #00b; } +pre.term span.b5 { background-color: #b0b; } +pre.term span.b6 { background-color: #0bb; } +pre.term span.b7 { background-color: #bbb; } + +body { background-color: #888; } +#term { + float: left; +} diff --git a/tools/ajaxterm/ajaxterm.html b/tools/ajaxterm/ajaxterm.html new file mode 100644 index 000000000000..7fdef5e9415d --- /dev/null +++ b/tools/ajaxterm/ajaxterm.html @@ -0,0 +1,25 @@ + + + + Ajaxterm + + + + + + + + +
+ + diff --git a/tools/ajaxterm/ajaxterm.js b/tools/ajaxterm/ajaxterm.js new file mode 100644 index 000000000000..32b401930c44 --- /dev/null +++ b/tools/ajaxterm/ajaxterm.js @@ -0,0 +1,279 @@ +ajaxterm={}; +ajaxterm.Terminal_ctor=function(id,width,height) { + var ie=0; + if(window.ActiveXObject) + ie=1; + var sid=""+SESSION_ID; + var query0="s="+sid+"&w="+width+"&h="+height; + var query1=query0+"&c=1&k="; + var buf=""; + var timeout; + var error_timeout; + var keybuf=[]; + var sending=0; + var rmax=1; + + var div=document.getElementById(id); + var dstat=document.createElement('pre'); + var sled=document.createElement('span'); + var opt_get=document.createElement('a'); + var opt_color=document.createElement('a'); + var opt_paste=document.createElement('a'); + var sdebug=document.createElement('span'); + var dterm=document.createElement('div'); + + function debug(s) { + sdebug.innerHTML=s; + } + function error() { + sled.className='off'; + debug("Connection lost timeout ts:"+((new Date).getTime())); + } + function opt_add(opt,name) { + opt.className='off'; + opt.innerHTML=' '+name+' '; + dstat.appendChild(opt); + dstat.appendChild(document.createTextNode(' ')); + } + function do_get(event) { + opt_get.className=(opt_get.className=='off')?'on':'off'; + debug('GET '+opt_get.className); + } + function do_color(event) { + var o=opt_color.className=(opt_color.className=='off')?'on':'off'; + if(o=='on') + query1=query0+"&c=1&k="; + else + query1=query0+"&k="; + debug('Color '+opt_color.className); + } + function mozilla_clipboard() { + // mozilla sucks + try { + netscape.security.PrivilegeManager.enablePrivilege("UniversalXPConnect"); + } catch (err) { + debug('Access denied, more info'); + return undefined; + } + var clip = Components.classes["@mozilla.org/widget/clipboard;1"].createInstance(Components.interfaces.nsIClipboard); + var trans = Components.classes["@mozilla.org/widget/transferable;1"].createInstance(Components.interfaces.nsITransferable); + if (!clip || !trans) { + return undefined; + } + trans.addDataFlavor("text/unicode"); + clip.getData(trans,clip.kGlobalClipboard); + var str=new Object(); + var strLength=new Object(); + try { + trans.getTransferData("text/unicode",str,strLength); + } catch(err) { + return ""; + } + if (str) { + str=str.value.QueryInterface(Components.interfaces.nsISupportsString); + } + if (str) { + return str.data.substring(0,strLength.value / 2); + } else { + return ""; + } + } + function do_paste(event) { + var p=undefined; + if (window.clipboardData) { + p=window.clipboardData.getData("Text"); + } else if(window.netscape) { + p=mozilla_clipboard(); + } + if (p) { + debug('Pasted'); + queue(encodeURIComponent(p)); + } else { + } + } + function update() { +// debug("ts: "+((new Date).getTime())+" rmax:"+rmax); + if(sending==0) { + sending=1; + sled.className='on'; + var r=new XMLHttpRequest(); + var send=""; + while(keybuf.length>0) { + send+=keybuf.pop(); + } + var query=query1+send; + if(opt_get.className=='on') { + r.open("GET","u?"+query,true); + if(ie) { + r.setRequestHeader("If-Modified-Since", "Sat, 1 Jan 2000 00:00:00 GMT"); + } + } else { + r.open("POST","u",true); + } + r.setRequestHeader('Content-Type','application/x-www-form-urlencoded'); + r.onreadystatechange = function () { +// debug("xhr:"+((new Date).getTime())+" state:"+r.readyState+" status:"+r.status+" statusText:"+r.statusText); + if (r.readyState==4) { + if(r.status==200) { + window.clearTimeout(error_timeout); + de=r.responseXML.documentElement; + if(de.tagName=="pre") { + if(ie) { + Sarissa.updateContentFromNode(de, dterm); + } else { + Sarissa.updateContentFromNode(de, dterm); +// old=div.firstChild; +// div.replaceChild(de,old); + } + rmax=100; + } else { + rmax*=2; + if(rmax>2000) + rmax=2000; + } + sending=0; + sled.className='off'; + timeout=window.setTimeout(update,rmax); + } else { + debug("Connection error status:"+r.status); + } + } + } + error_timeout=window.setTimeout(error,5000); + if(opt_get.className=='on') { + r.send(null); + } else { + r.send(query); + } + } + } + function queue(s) { + keybuf.unshift(s); + if(sending==0) { + window.clearTimeout(timeout); + timeout=window.setTimeout(update,1); + } + } + function keypress(ev) { + if (!ev) var ev=window.event; +// s="kp keyCode="+ev.keyCode+" which="+ev.which+" shiftKey="+ev.shiftKey+" ctrlKey="+ev.ctrlKey+" altKey="+ev.altKey; +// debug(s); +// return false; +// else { if (!ev.ctrlKey || ev.keyCode==17) { return; } + var kc; + var k=""; + if (ev.keyCode) + kc=ev.keyCode; + if (ev.which) + kc=ev.which; + if (ev.altKey) { + if (kc>=65 && kc<=90) + kc+=32; + if (kc>=97 && kc<=122) { + k=String.fromCharCode(27)+String.fromCharCode(kc); + } + } else if (ev.ctrlKey) { + if (kc>=65 && kc<=90) k=String.fromCharCode(kc-64); // Ctrl-A..Z + else if (kc>=97 && kc<=122) k=String.fromCharCode(kc-96); // Ctrl-A..Z + else if (kc==54) k=String.fromCharCode(30); // Ctrl-^ + else if (kc==109) k=String.fromCharCode(31); // Ctrl-_ + else if (kc==219) k=String.fromCharCode(27); // Ctrl-[ + else if (kc==220) k=String.fromCharCode(28); // Ctrl-\ + else if (kc==221) k=String.fromCharCode(29); // Ctrl-] + else if (kc==219) k=String.fromCharCode(29); // Ctrl-] + else if (kc==219) k=String.fromCharCode(0); // Ctrl-@ + } else if (ev.which==0) { + if (kc==9) k=String.fromCharCode(9); // Tab + else if (kc==8) k=String.fromCharCode(127); // Backspace + else if (kc==27) k=String.fromCharCode(27); // Escape + else { + if (kc==33) k="[5~"; // PgUp + else if (kc==34) k="[6~"; // PgDn + else if (kc==35) k="[4~"; // End + else if (kc==36) k="[1~"; // Home + else if (kc==37) k="[D"; // Left + else if (kc==38) k="[A"; // Up + else if (kc==39) k="[C"; // Right + else if (kc==40) k="[B"; // Down + else if (kc==45) k="[2~"; // Ins + else if (kc==46) k="[3~"; // Del + else if (kc==112) k="[[A"; // F1 + else if (kc==113) k="[[B"; // F2 + else if (kc==114) k="[[C"; // F3 + else if (kc==115) k="[[D"; // F4 + else if (kc==116) k="[[E"; // F5 + else if (kc==117) k="[17~"; // F6 + else if (kc==118) k="[18~"; // F7 + else if (kc==119) k="[19~"; // F8 + else if (kc==120) k="[20~"; // F9 + else if (kc==121) k="[21~"; // F10 + else if (kc==122) k="[23~"; // F11 + else if (kc==123) k="[24~"; // F12 + if (k.length) { + k=String.fromCharCode(27)+k; + } + } + } else { + if (kc==8) + k=String.fromCharCode(127); // Backspace + else + k=String.fromCharCode(kc); + } + if(k.length) { +// queue(encodeURIComponent(k)); + if(k=="+") { + queue("%2B"); + } else { + queue(escape(k)); + } + } + ev.cancelBubble=true; + if (ev.stopPropagation) ev.stopPropagation(); + if (ev.preventDefault) ev.preventDefault(); + return false; + } + function keydown(ev) { + if (!ev) var ev=window.event; + if (ie) { +// s="kd keyCode="+ev.keyCode+" which="+ev.which+" shiftKey="+ev.shiftKey+" ctrlKey="+ev.ctrlKey+" altKey="+ev.altKey; +// debug(s); + o={9:1,8:1,27:1,33:1,34:1,35:1,36:1,37:1,38:1,39:1,40:1,45:1,46:1,112:1, + 113:1,114:1,115:1,116:1,117:1,118:1,119:1,120:1,121:1,122:1,123:1}; + if (o[ev.keyCode] || ev.ctrlKey || ev.altKey) { + ev.which=0; + return keypress(ev); + } + } + } + function init() { + sled.appendChild(document.createTextNode('\xb7')); + sled.className='off'; + dstat.appendChild(sled); + dstat.appendChild(document.createTextNode(' ')); + opt_add(opt_color,'Colors'); + opt_color.className='on'; + opt_add(opt_get,'GET'); + opt_add(opt_paste,'Paste'); + dstat.appendChild(sdebug); + dstat.className='stat'; + div.appendChild(dstat); + div.appendChild(dterm); + if(opt_color.addEventListener) { + opt_get.addEventListener('click',do_get,true); + opt_color.addEventListener('click',do_color,true); + opt_paste.addEventListener('click',do_paste,true); + } else { + opt_get.attachEvent("onclick", do_get); + opt_color.attachEvent("onclick", do_color); + opt_paste.attachEvent("onclick", do_paste); + } + document.onkeypress=keypress; + document.onkeydown=keydown; + timeout=window.setTimeout(update,100); + } + init(); +} +ajaxterm.Terminal=function(id,width,height) { + return new this.Terminal_ctor(id,width,height); +} + diff --git a/tools/ajaxterm/ajaxterm.py b/tools/ajaxterm/ajaxterm.py new file mode 100755 index 000000000000..bf27b264a465 --- /dev/null +++ b/tools/ajaxterm/ajaxterm.py @@ -0,0 +1,586 @@ +#!/usr/bin/env python + +""" Ajaxterm """ + +import array,cgi,fcntl,glob,mimetypes,optparse,os,pty,random,re,signal,select,sys,threading,time,termios,struct,pwd + +os.chdir(os.path.normpath(os.path.dirname(__file__))) +# Optional: Add QWeb in sys path +sys.path[0:0]=glob.glob('../../python') + +import qweb +import string, subprocess, uuid + +global g_server +TIMEOUT=300 + +class Terminal: + def __init__(self,width=80,height=24): + self.width=width + self.height=height + self.init() + self.reset() + def init(self): + self.esc_seq={ + "\x00": None, + "\x05": self.esc_da, + "\x07": None, + "\x08": self.esc_0x08, + "\x09": self.esc_0x09, + "\x0a": self.esc_0x0a, + "\x0b": self.esc_0x0a, + "\x0c": self.esc_0x0a, + "\x0d": self.esc_0x0d, + "\x0e": None, + "\x0f": None, + "\x1b#8": None, + "\x1b=": None, + "\x1b>": None, + "\x1b(0": None, + "\x1b(A": None, + "\x1b(B": None, + "\x1b[c": self.esc_da, + "\x1b[0c": self.esc_da, + "\x1b]R": None, + "\x1b7": self.esc_save, + "\x1b8": self.esc_restore, + "\x1bD": None, + "\x1bE": None, + "\x1bH": None, + "\x1bM": self.esc_ri, + "\x1bN": None, + "\x1bO": None, + "\x1bZ": self.esc_da, + "\x1ba": None, + "\x1bc": self.reset, + "\x1bn": None, + "\x1bo": None, + } + for k,v in self.esc_seq.items(): + if v==None: + self.esc_seq[k]=self.esc_ignore + # regex + d={ + r'\[\??([0-9;]*)([@ABCDEFGHJKLMPXacdefghlmnqrstu`])' : self.csi_dispatch, + r'\]([^\x07]+)\x07' : self.esc_ignore, + } + self.esc_re=[] + for k,v in d.items(): + self.esc_re.append((re.compile('\x1b'+k),v)) + # define csi sequences + self.csi_seq={ + '@': (self.csi_at,[1]), + '`': (self.csi_G,[1]), + 'J': (self.csi_J,[0]), + 'K': (self.csi_K,[0]), + } + for i in [i[4] for i in dir(self) if i.startswith('csi_') and len(i)==5]: + if not self.csi_seq.has_key(i): + self.csi_seq[i]=(getattr(self,'csi_'+i),[1]) + # Init 0-256 to latin1 and html translation table + self.trl1="" + for i in range(256): + if i<32: + self.trl1+=" " + elif i<127 or i>160: + self.trl1+=chr(i) + else: + self.trl1+="?" + self.trhtml="" + for i in range(256): + if i==0x0a or (i>32 and i<127) or i>160: + self.trhtml+=chr(i) + elif i<=32: + self.trhtml+="\xa0" + else: + self.trhtml+="?" + def reset(self,s=""): + self.scr=array.array('i',[0x000700]*(self.width*self.height)) + self.st=0 + self.sb=self.height-1 + self.cx_bak=self.cx=0 + self.cy_bak=self.cy=0 + self.cl=0 + self.sgr=0x000700 + self.buf="" + self.outbuf="" + self.last_html="" + def peek(self,y1,x1,y2,x2): + return self.scr[self.width*y1+x1:self.width*y2+x2] + def poke(self,y,x,s): + pos=self.width*y+x + self.scr[pos:pos+len(s)]=s + def zero(self,y1,x1,y2,x2): + w=self.width*(y2-y1)+x2-x1+1 + z=array.array('i',[0x000700]*w) + self.scr[self.width*y1+x1:self.width*y2+x2+1]=z + def scroll_up(self,y1,y2): + self.poke(y1,0,self.peek(y1+1,0,y2,self.width)) + self.zero(y2,0,y2,self.width-1) + def scroll_down(self,y1,y2): + self.poke(y1+1,0,self.peek(y1,0,y2-1,self.width)) + self.zero(y1,0,y1,self.width-1) + def scroll_right(self,y,x): + self.poke(y,x+1,self.peek(y,x,y,self.width)) + self.zero(y,x,y,x) + def cursor_down(self): + if self.cy>=self.st and self.cy<=self.sb: + self.cl=0 + q,r=divmod(self.cy+1,self.sb+1) + if q: + self.scroll_up(self.st,self.sb) + self.cy=self.sb + else: + self.cy=r + def cursor_right(self): + q,r=divmod(self.cx+1,self.width) + if q: + self.cl=1 + else: + self.cx=r + def echo(self,c): + if self.cl: + self.cursor_down() + self.cx=0 + self.scr[(self.cy*self.width)+self.cx]=self.sgr|ord(c) + self.cursor_right() + def esc_0x08(self,s): + self.cx=max(0,self.cx-1) + def esc_0x09(self,s): + x=self.cx+8 + q,r=divmod(x,8) + self.cx=(q*8)%self.width + def esc_0x0a(self,s): + self.cursor_down() + def esc_0x0d(self,s): + self.cl=0 + self.cx=0 + def esc_save(self,s): + self.cx_bak=self.cx + self.cy_bak=self.cy + def esc_restore(self,s): + self.cx=self.cx_bak + self.cy=self.cy_bak + self.cl=0 + def esc_da(self,s): + self.outbuf="\x1b[?6c" + def esc_ri(self,s): + self.cy=max(self.st,self.cy-1) + if self.cy==self.st: + self.scroll_down(self.st,self.sb) + def esc_ignore(self,*s): + pass +# print "term:ignore: %s"%repr(s) + def csi_dispatch(self,seq,mo): + # CSI sequences + s=mo.group(1) + c=mo.group(2) + f=self.csi_seq.get(c,None) + if f: + try: + l=[min(int(i),1024) for i in s.split(';') if len(i)<4] + except ValueError: + l=[] + if len(l)==0: + l=f[1] + f[0](l) +# else: +# print 'csi ignore',c,l + def csi_at(self,l): + for i in range(l[0]): + self.scroll_right(self.cy,self.cx) + def csi_A(self,l): + self.cy=max(self.st,self.cy-l[0]) + def csi_B(self,l): + self.cy=min(self.sb,self.cy+l[0]) + def csi_C(self,l): + self.cx=min(self.width-1,self.cx+l[0]) + self.cl=0 + def csi_D(self,l): + self.cx=max(0,self.cx-l[0]) + self.cl=0 + def csi_E(self,l): + self.csi_B(l) + self.cx=0 + self.cl=0 + def csi_F(self,l): + self.csi_A(l) + self.cx=0 + self.cl=0 + def csi_G(self,l): + self.cx=min(self.width,l[0])-1 + def csi_H(self,l): + if len(l)<2: l=[1,1] + self.cx=min(self.width,l[1])-1 + self.cy=min(self.height,l[0])-1 + self.cl=0 + def csi_J(self,l): + if l[0]==0: + self.zero(self.cy,self.cx,self.height-1,self.width-1) + elif l[0]==1: + self.zero(0,0,self.cy,self.cx) + elif l[0]==2: + self.zero(0,0,self.height-1,self.width-1) + def csi_K(self,l): + if l[0]==0: + self.zero(self.cy,self.cx,self.cy,self.width-1) + elif l[0]==1: + self.zero(self.cy,0,self.cy,self.cx) + elif l[0]==2: + self.zero(self.cy,0,self.cy,self.width-1) + def csi_L(self,l): + for i in range(l[0]): + if self.cy=self.st and self.cy<=self.sb: + for i in range(l[0]): + self.scroll_up(self.cy,self.sb) + def csi_P(self,l): + w,cx,cy=self.width,self.cx,self.cy + end=self.peek(cy,cx,cy,w) + self.csi_K([0]) + self.poke(cy,cx,end[l[0]:]) + def csi_X(self,l): + self.zero(self.cy,self.cx,self.cy,self.cx+l[0]) + def csi_a(self,l): + self.csi_C(l) + def csi_c(self,l): + #'\x1b[?0c' 0-8 cursor size + pass + def csi_d(self,l): + self.cy=min(self.height,l[0])-1 + def csi_e(self,l): + self.csi_B(l) + def csi_f(self,l): + self.csi_H(l) + def csi_h(self,l): + if l[0]==4: + pass +# print "insert on" + def csi_l(self,l): + if l[0]==4: + pass +# print "insert off" + def csi_m(self,l): + for i in l: + if i==0 or i==39 or i==49 or i==27: + self.sgr=0x000700 + elif i==1: + self.sgr=(self.sgr|0x000800) + elif i==7: + self.sgr=0x070000 + elif i>=30 and i<=37: + c=i-30 + self.sgr=(self.sgr&0xff08ff)|(c<<8) + elif i>=40 and i<=47: + c=i-40 + self.sgr=(self.sgr&0x00ffff)|(c<<16) +# else: +# print "CSI sgr ignore",l,i +# print 'sgr: %r %x'%(l,self.sgr) + def csi_r(self,l): + if len(l)<2: l=[0,self.height] + self.st=min(self.height-1,l[0]-1) + self.sb=min(self.height-1,l[1]-1) + self.sb=max(self.st,self.sb) + def csi_s(self,l): + self.esc_save(0) + def csi_u(self,l): + self.esc_restore(0) + def escape(self): + e=self.buf + if len(e)>32: +# print "error %r"%e + self.buf="" + elif e in self.esc_seq: + self.esc_seq[e](e) + self.buf="" + else: + for r,f in self.esc_re: + mo=r.match(e) + if mo: + f(e,mo) + self.buf="" + break +# if self.buf=='': print "ESC %r\n"%e + def write(self,s): + for i in s: + if len(self.buf) or (i in self.esc_seq): + self.buf+=i + self.escape() + elif i == '\x1b': + self.buf+=i + else: + self.echo(i) + def read(self): + b=self.outbuf + self.outbuf="" + return b + def dump(self): + r='' + for i in self.scr: + r+=chr(i&255) + return r + def dumplatin1(self): + return self.dump().translate(self.trl1) + def dumphtml(self,color=1): + h=self.height + w=self.width + r="" + span="" + span_bg,span_fg=-1,-1 + for i in range(h*w): + q,c=divmod(self.scr[i],256) + if color: + bg,fg=divmod(q,256) + else: + bg,fg=0,7 + if i==self.cy*w+self.cx: + bg,fg=1,7 + if (bg!=span_bg or fg!=span_fg or i==h*w-1): + if len(span): + r+='%s'%(span_fg,span_bg,cgi.escape(span.translate(self.trhtml))) + span="" + span_bg,span_fg=bg,fg + span+=chr(c) + if i%w==w-1: + span+='\n' + r='
%s
'%r + if self.last_html==r: + return '' + else: + self.last_html=r +# print self + return r + def __repr__(self): + d=self.dumplatin1() + r="" + for i in range(self.height): + r+="|%s|\n"%d[self.width*i:self.width*(i+1)] + return r + +class SynchronizedMethod: + def __init__(self,lock,orig): + self.lock=lock + self.orig=orig + def __call__(self,*l): + self.lock.acquire() + r=self.orig(*l) + self.lock.release() + return r + +class Multiplex: + def __init__(self,cmd=None): + signal.signal(signal.SIGCHLD, signal.SIG_IGN) + self.cmd=cmd + self.proc={} + self.lock=threading.RLock() + self.thread=threading.Thread(target=self.loop) + self.alive=1 + self.lastActivity=time.time() + # synchronize methods + for name in ['create','fds','proc_read','proc_write','dump','die','run']: + orig=getattr(self,name) + setattr(self,name,SynchronizedMethod(self.lock,orig)) + self.thread.start() + def create(self,w=80,h=25): + pid,fd=pty.fork() + if pid==0: + try: + fdl=[int(i) for i in os.listdir('/proc/self/fd')] + except OSError: + fdl=range(256) + for i in [i for i in fdl if i>2]: + try: + os.close(i) + except OSError: + pass + if self.cmd: + cmd=['/bin/sh','-c',self.cmd] + elif os.getuid()==0: + cmd=['/bin/login'] + else: + sys.stdout.write("Login: ") + login=sys.stdin.readline().strip() + if re.match('^[0-9A-Za-z-_. ]+$',login): + cmd=['ssh'] + cmd+=['-oPreferredAuthentications=keyboard-interactive,password'] + cmd+=['-oNoHostAuthenticationForLocalhost=yes'] + cmd+=['-oLogLevel=FATAL'] + cmd+=['-F/dev/null','-l',login,'localhost'] + else: + os._exit(0) + env={} + env["COLUMNS"]=str(w) + env["LINES"]=str(h) + env["TERM"]="linux" + env["PATH"]=os.environ['PATH'] + os.execvpe(cmd[0],cmd,env) + else: + fcntl.fcntl(fd, fcntl.F_SETFL, os.O_NONBLOCK) + # python bug http://python.org/sf/1112949 on amd64 + fcntl.ioctl(fd, struct.unpack('i',struct.pack('I',termios.TIOCSWINSZ))[0], struct.pack("HHHH",h,w,0,0)) + self.proc[fd]={'pid':pid,'term':Terminal(w,h),'buf':'','time':time.time()} + return fd + def die(self): + self.alive=0 + def run(self): + return self.alive + def fds(self): + return self.proc.keys() + def proc_kill(self,fd): + if fd in self.proc: + self.proc[fd]['time']=0 + t=time.time() + for i in self.proc.keys(): + t0=self.proc[i]['time'] + if (t-t0)>TIMEOUT: + try: + os.close(i) + os.kill(self.proc[i]['pid'],signal.SIGTERM) + except (IOError,OSError): + pass + del self.proc[i] + def proc_read(self,fd): + try: + t=self.proc[fd]['term'] + t.write(os.read(fd,65536)) + reply=t.read() + if reply: + os.write(fd,reply) + self.proc[fd]['time']=time.time() + except (KeyError,IOError,OSError): + self.proc_kill(fd) + def proc_write(self,fd,s): + try: + os.write(fd,s) + except (IOError,OSError): + self.proc_kill(fd) + def dump(self,fd,color=1): + try: + return self.proc[fd]['term'].dumphtml(color) + except KeyError: + return False + def loop(self): + while self.run(): + fds=self.fds() + i,o,e=select.select(fds, [], [], 1.0) + if time.time() - self.lastActivity > TIMEOUT: + global g_server + g_server.shutdown() + for fd in i: + self.proc_read(fd) + if len(i): + time.sleep(0.002) + for i in self.proc.keys(): + try: + os.close(i) + os.kill(self.proc[i]['pid'],signal.SIGTERM) + except (IOError,OSError): + pass + +class AjaxTerm: + def __init__(self,cmd=None,index_file='ajaxterm.html',token=None): + self.files={} + self.token=token + for i in ['css','html','js']: + for j in glob.glob('*.%s'%i): + self.files[j]=file(j).read() + self.files['index']=file(index_file).read() + self.mime = mimetypes.types_map.copy() + self.mime['.html']= 'text/html; charset=UTF-8' + self.multi = Multiplex(cmd) + self.session = {} + def __call__(self, environ, start_response): + req = qweb.QWebRequest(environ, start_response,session=None) + if req.PATH_INFO.endswith('/u'): + s=req.REQUEST["s"] + k=req.REQUEST["k"] + c=req.REQUEST["c"] + w=req.REQUEST.int("w") + h=req.REQUEST.int("h") + if s in self.session: + term=self.session[s] + else: + raise Exception('Not Authorized') + # The original code below was insecure, because it allowed unauthorized sessions to be created + # if not (w>2 and w<256 and h>2 and h<100): + # w,h=80,25 + # term=self.session[s]=self.multi.create(w,h) + if k: + self.multi.proc_write(term,k) + time.sleep(0.002) + self.multi.lastActivity = time.time(); + dump=self.multi.dump(term,c) + req.response_headers['Content-Type']='text/xml' + if isinstance(dump,str): + req.write(dump) + req.response_gzencode=1 + else: + del self.session[s] + req.write('') +# print "sessions %r"%self.session + else: + n=os.path.basename(req.PATH_INFO) + if n in self.files: + req.response_headers['Content-Type'] = self.mime.get(os.path.splitext(n)[1].lower(), 'application/octet-stream') + req.write(self.files[n]) + elif req.REQUEST['token'] == self.token: + req.response_headers['Content-Type'] = 'text/html; charset=UTF-8' + session_id = str(uuid.uuid4()) + req.write(string.Template(self.files['index']).substitute(session_id=session_id)) + term=self.session[session_id]=self.multi.create(80,25) + else: + raise Exception("Not Authorized") + return req + +def main(): + parser = optparse.OptionParser() + parser.add_option("-p", "--port", dest="port", default="8022", help="Set the TCP port (default: 8022)") + parser.add_option("-c", "--command", dest="cmd", default=None,help="set the command (default: /bin/login or ssh 0.0.0.0)") + parser.add_option("-l", "--log", action="store_true", dest="log",default=0,help="log requests to stderr (default: quiet mode)") + parser.add_option("-d", "--daemon", action="store_true", dest="daemon", default=0, help="run as daemon in the background") + parser.add_option("-P", "--pidfile",dest="pidfile",default="/var/run/ajaxterm.pid",help="set the pidfile (default: /var/run/ajaxterm.pid)") + parser.add_option("-i", "--index", dest="index_file", default="ajaxterm.html",help="default index file (default: ajaxterm.html)") + parser.add_option("-u", "--uid", dest="uid", help="Set the daemon's user id") + parser.add_option("-t", "--token", dest="token", help="Set authorization token") + (o, a) = parser.parse_args() + if o.daemon: + pid=os.fork() + if pid == 0: + #os.setsid() ? + os.setpgrp() + nullin = file('/dev/null', 'r') + nullout = file('/dev/null', 'w') + os.dup2(nullin.fileno(), sys.stdin.fileno()) + os.dup2(nullout.fileno(), sys.stdout.fileno()) + os.dup2(nullout.fileno(), sys.stderr.fileno()) + if os.getuid()==0 and o.uid: + try: + os.setuid(int(o.uid)) + except: + os.setuid(pwd.getpwnam(o.uid).pw_uid) + else: + try: + file(o.pidfile,'w+').write(str(pid)+'\n') + except: + pass + print 'AjaxTerm at http://0.0.0.0:%s/ pid: %d' % (o.port,pid) + sys.exit(0) + else: + print 'AjaxTerm at http://0.0.0.0:%s/' % o.port + at=AjaxTerm(o.cmd,o.index_file,o.token) +# f=lambda:os.system('firefox http://localhost:%s/&'%o.port) +# qweb.qweb_wsgi_autorun(at,ip='localhost',port=int(o.port),threaded=0,log=o.log,callback_ready=None) + try: + global g_server + g_server = qweb.QWebWSGIServer(at,ip='0.0.0.0',port=int(o.port),threaded=0,log=o.log) + g_server.serve_forever() + except KeyboardInterrupt,e: + sys.excepthook(*sys.exc_info()) + at.multi.die() + +if __name__ == '__main__': + main() + diff --git a/tools/ajaxterm/configure b/tools/ajaxterm/configure new file mode 100755 index 000000000000..45391f484759 --- /dev/null +++ b/tools/ajaxterm/configure @@ -0,0 +1,32 @@ +#!/usr/bin/env python + +import optparse,os + +parser = optparse.OptionParser() +parser.add_option("", "--prefix", dest="prefix",default="/usr/local",help="installation prefix (default: /usr/local)") +parser.add_option("", "--confdir", dest="confdir", default="/etc",help="configuration files directory prefix (default: /etc)") +parser.add_option("", "--port", dest="port", default="8022", help="set the listening TCP port (default: 8022)") +parser.add_option("", "--command", dest="cmd", default=None,help="set the command (default: /bin/login or ssh localhost)") +(o, a) = parser.parse_args() + +print "Configuring prefix=",o.prefix," port=",o.port + +etc=o.confdir +port=o.port +cmd=o.cmd +bin=os.path.join(o.prefix,"bin") +lib=os.path.join(o.prefix,"share/ajaxterm") +man=os.path.join(o.prefix,"share/man/man1") + +file("ajaxterm.bin","w").write(file("configure.ajaxterm.bin").read()%locals()) +file("Makefile","w").write(file("configure.makefile").read()%locals()) + +if os.path.isfile("/etc/gentoo-release"): + file("ajaxterm.initd","w").write(file("configure.initd.gentoo").read()%locals()) +elif os.path.isfile("/etc/fedora-release") or os.path.isfile("/etc/redhat-release"): + file("ajaxterm.initd","w").write(file("configure.initd.redhat").read()%locals()) +else: + file("ajaxterm.initd","w").write(file("configure.initd.debian").read()%locals()) + +os.system("chmod a+x ajaxterm.bin") +os.system("chmod a+x ajaxterm.initd") diff --git a/tools/ajaxterm/configure.ajaxterm.bin b/tools/ajaxterm/configure.ajaxterm.bin new file mode 100644 index 000000000000..4d1f5a98f080 --- /dev/null +++ b/tools/ajaxterm/configure.ajaxterm.bin @@ -0,0 +1,2 @@ +#!/bin/sh +PYTHONPATH=%(lib)s exec %(lib)s/ajaxterm.py $@ diff --git a/tools/ajaxterm/configure.initd.debian b/tools/ajaxterm/configure.initd.debian new file mode 100644 index 000000000000..901082707249 --- /dev/null +++ b/tools/ajaxterm/configure.initd.debian @@ -0,0 +1,33 @@ +#!/bin/sh + +PATH=/usr/local/bin:/usr/local/sbin:/usr/bin:/usr/sbin:/bin:/sbin +DAEMON=%(bin)s/ajaxterm +PORT=%(port)s +PIDFILE=/var/run/ajaxterm.pid + +[ -x "$DAEMON" ] || exit 0 + +#. /lib/lsb/init-functions + +case "$1" in + start) + echo "Starting ajaxterm on port $PORT" + start-stop-daemon --start --pidfile $PIDFILE --exec $DAEMON -- --daemon --port=$PORT --uid=nobody || return 2 + ;; + stop) + echo "Stopping ajaxterm" + start-stop-daemon --stop --pidfile $PIDFILE + rm -f $PIDFILE + ;; + restart|force-reload) + $0 stop + sleep 1 + $0 start + ;; + *) + echo "Usage: $SCRIPTNAME {start|stop|restart|force-reload}" >&2 + exit 3 + ;; +esac + +: diff --git a/tools/ajaxterm/configure.initd.gentoo b/tools/ajaxterm/configure.initd.gentoo new file mode 100644 index 000000000000..ac28ef0b66cc --- /dev/null +++ b/tools/ajaxterm/configure.initd.gentoo @@ -0,0 +1,27 @@ +#!/sbin/runscript + +# AjaxTerm Gentoo script, 08 May 2006 Mark Gillespie + +DAEMON=%(bin)s/ajaxterm +PORT=%(port)s +PIDFILE=/var/run/ajaxterm.pid + +depend() +{ + need net +} + +start() +{ + ebegin "Starting AjaxTerm on port $PORT" + start-stop-daemon --start --pidfile $PIDFILE --exec $DAEMON -- --daemon --port=$PORT --uid=nobody + eend $? +} + +stop() +{ + ebegin "Stopping AjaxTerm" + start-stop-daemon --stop --pidfile $PIDFILE + rm -f $PIDFILE + eend $? +} diff --git a/tools/ajaxterm/configure.initd.redhat b/tools/ajaxterm/configure.initd.redhat new file mode 100644 index 000000000000..5c9788574fc7 --- /dev/null +++ b/tools/ajaxterm/configure.initd.redhat @@ -0,0 +1,75 @@ +# +# ajaxterm Startup script for ajaxterm +# +# chkconfig: - 99 99 +# description: Ajaxterm is a yadda yadda yadda +# processname: ajaxterm +# pidfile: /var/run/ajaxterm.pid +# version: 1.0 Kevin Reichhart - ajaxterminit at lastname dot org + +# Source function library. +. /etc/rc.d/init.d/functions + +if [ -f /etc/sysconfig/ajaxterm ]; then + . /etc/sysconfig/ajaxterm +fi + +ajaxterm=/usr/local/bin/ajaxterm +prog=ajaxterm +pidfile=${PIDFILE-/var/run/ajaxterm.pid} +lockfile=${LOCKFILE-/var/lock/subsys/ajaxterm} +port=${PORT-8022} +user=${xUSER-nobody} +RETVAL=0 + + +start() { + echo -n $"Starting $prog: " + daemon $ajaxterm --daemon --port=$port --uid=$user $OPTIONS + RETVAL=$? + echo + [ $RETVAL = 0 ] && touch ${lockfile} + return $RETVAL +} +stop() { + echo -n $"Stopping $prog: " + killproc $ajaxterm + RETVAL=$? + echo + [ $RETVAL = 0 ] && rm -f ${lockfile} ${pidfile} +} +reload() { + echo -n $"Reloading $prog: " + killproc $ajaxterm -HUP + RETVAL=$? + echo +} + +# See how we were called. +case "$1" in + start) + start + ;; + stop) + stop + ;; + status) + status python ajaxterm + RETVAL=$? + ;; + restart) + stop + start + ;; + condrestart) + if [ -f ${pidfile} ] ; then + stop + start + fi + ;; + *) + echo $"Usage: $prog {start|stop|restart|condrestart}" + exit 1 +esac + +exit $RETVAL diff --git a/tools/ajaxterm/configure.makefile b/tools/ajaxterm/configure.makefile new file mode 100644 index 000000000000..6bd80853dd6a --- /dev/null +++ b/tools/ajaxterm/configure.makefile @@ -0,0 +1,20 @@ +build: + true + +install: + install -d "%(bin)s" + install -d "%(lib)s" + install ajaxterm.bin "%(bin)s/ajaxterm" + install ajaxterm.initd "%(etc)s/init.d/ajaxterm" + install -m 644 ajaxterm.css ajaxterm.html ajaxterm.js qweb.py sarissa.js sarissa_dhtml.js "%(lib)s" + install -m 755 ajaxterm.py "%(lib)s" + gzip --best -c ajaxterm.1 > ajaxterm.1.gz + install -d "%(man)s" + install ajaxterm.1.gz "%(man)s" + +clean: + rm ajaxterm.bin + rm ajaxterm.initd + rm ajaxterm.1.gz + rm Makefile + diff --git a/tools/ajaxterm/qweb.py b/tools/ajaxterm/qweb.py new file mode 100644 index 000000000000..20c5092300ab --- /dev/null +++ b/tools/ajaxterm/qweb.py @@ -0,0 +1,1356 @@ +#!/usr/bin/python2.3 +# +# vim:set et ts=4 fdc=0 fdn=2 fdl=0: +# +# There are no blank lines between blocks beacause i use folding from: +# http://www.vim.org/scripts/script.php?script_id=515 +# + +"""= QWeb Framework = + +== What is QWeb ? == + +QWeb is a python based [http://www.python.org/doc/peps/pep-0333/ WSGI] +compatible web framework, it provides an infratructure to quickly build web +applications consisting of: + + * A lightweight request handler (QWebRequest) + * An xml templating engine (QWebXml and QWebHtml) + * A simple name based controler (qweb_control) + * A standalone WSGI Server (QWebWSGIServer) + * A cgi and fastcgi WSGI wrapper (taken from flup) + * A startup function that starts cgi, factgi or standalone according to the + evironement (qweb_autorun). + +QWeb applications are runnable in standalone mode (from commandline), via +FastCGI, Regular CGI or by any python WSGI compliant server. + +QWeb doesn't provide any database access but it integrates nicely with ORMs +such as SQLObject, SQLAlchemy or plain DB-API. + +Written by Antony Lesuisse (email al AT udev.org) + +Homepage: http://antony.lesuisse.org/qweb/trac/ + +Forum: [http://antony.lesuisse.org/qweb/forum/viewforum.php?id=1 Forum] + +== Quick Start (for Linux, MacOS X and cygwin) == + +Make sure you have at least python 2.3 installed and run the following commands: + +{{{ +$ wget http://antony.lesuisse.org/qweb/files/QWeb-0.7.tar.gz +$ tar zxvf QWeb-0.7.tar.gz +$ cd QWeb-0.7/examples/blog +$ ./blog.py +}}} + +And point your browser to http://localhost:8080/ + +You may also try AjaxTerm which uses qweb request handler. + +== Download == + + * Version 0.7: + * Source [/qweb/files/QWeb-0.7.tar.gz QWeb-0.7.tar.gz] + * Python 2.3 Egg [/qweb/files/QWeb-0.7-py2.3.egg QWeb-0.7-py2.3.egg] + * Python 2.4 Egg [/qweb/files/QWeb-0.7-py2.4.egg QWeb-0.7-py2.4.egg] + + * [/qweb/trac/browser Browse the source repository] + +== Documentation == + + * [/qweb/trac/browser/trunk/README.txt?format=raw Read the included documentation] + * QwebTemplating + +== Mailin-list == + + * Forum: [http://antony.lesuisse.org/qweb/forum/viewforum.php?id=1 Forum] + * No mailing-list exists yet, discussion should happen on: [http://mail.python.org/mailman/listinfo/web-sig web-sig] [http://mail.python.org/pipermail/web-sig/ archives] + +QWeb Components: +---------------- + +QWeb also feature a simple components api, that enables developers to easily +produces reusable components. + +Default qweb components: + + - qweb_static: + A qweb component to serve static content from the filesystem or from + zipfiles. + + - qweb_dbadmin: + scaffolding for sqlobject + +License +------- +qweb/fcgi.py wich is BSD-like from saddi.com. +Everything else is put in the public domain. + + +TODO +---- + Announce QWeb to python-announce-list@python.org web-sig@python.org + qweb_core + rename request methods into + request_save_files + response_404 + response_redirect + response_download + request callback_generator, callback_function ? + wsgi callback_server_local + xml tags explicitly call render_attributes(t_att)? + priority form-checkbox over t-value (for t-option) + +""" + +import BaseHTTPServer,SocketServer,Cookie +import cgi,datetime,email,email.Message,errno,gzip,os,random,re,socket,sys,tempfile,time,types,urllib,urlparse,xml.dom +try: + import cPickle as pickle +except ImportError: + import pickle +try: + import cStringIO as StringIO +except ImportError: + import StringIO + +#---------------------------------------------------------- +# Qweb Xml t-raw t-esc t-if t-foreach t-set t-call t-trim +#---------------------------------------------------------- +class QWebEval: + def __init__(self,data): + self.data=data + def __getitem__(self,expr): + if self.data.has_key(expr): + return self.data[expr] + r=None + try: + r=eval(expr,self.data) + except NameError,e: + pass + except AttributeError,e: + pass + except Exception,e: + print "qweb: expression error '%s' "%expr,e + if self.data.has_key("__builtins__"): + del self.data["__builtins__"] + return r + def eval_object(self,expr): + return self[expr] + def eval_str(self,expr): + if expr=="0": + return self.data[0] + if isinstance(self[expr],unicode): + return self[expr].encode("utf8") + return str(self[expr]) + def eval_format(self,expr): + try: + return str(expr%self) + except: + return "qweb: format error '%s' "%expr +# if isinstance(r,unicode): +# return r.encode("utf8") + def eval_bool(self,expr): + if self.eval_object(expr): + return 1 + else: + return 0 +class QWebXml: + """QWeb Xml templating engine + + The templating engine use a very simple syntax, "magic" xml attributes, to + produce any kind of texutal output (even non-xml). + + QWebXml: + the template engine core implements the basic magic attributes: + + t-att t-raw t-esc t-if t-foreach t-set t-call t-trim + + """ + def __init__(self,x=None,zipname=None): + self.node=xml.dom.Node + self._t={} + self._render_tag={} + prefix='render_tag_' + for i in [j for j in dir(self) if j.startswith(prefix)]: + name=i[len(prefix):].replace('_','-') + self._render_tag[name]=getattr(self.__class__,i) + + self._render_att={} + prefix='render_att_' + for i in [j for j in dir(self) if j.startswith(prefix)]: + name=i[len(prefix):].replace('_','-') + self._render_att[name]=getattr(self.__class__,i) + + if x!=None: + if zipname!=None: + import zipfile + zf=zipfile.ZipFile(zipname, 'r') + self.add_template(zf.read(x)) + else: + self.add_template(x) + def register_tag(self,tag,func): + self._render_tag[tag]=func + def add_template(self,x): + if hasattr(x,'documentElement'): + dom=x + elif x.startswith("%s%s"%(name,g_att,pre,inner,name) + else: + return "<%s%s/>"%(name,g_att) + + # Attributes + def render_att_att(self,e,an,av,v): + if an.startswith("t-attf-"): + att,val=an[7:],self.eval_format(av,v) + elif an.startswith("t-att-"): + att,val=(an[6:],self.eval_str(av,v)) + else: + att,val=self.eval_object(av,v) + return ' %s="%s"'%(att,cgi.escape(val,1)) + + # Tags + def render_tag_raw(self,e,t_att,g_att,v): + return self.eval_str(t_att["raw"],v) + def render_tag_rawf(self,e,t_att,g_att,v): + return self.eval_format(t_att["rawf"],v) + def render_tag_esc(self,e,t_att,g_att,v): + return cgi.escape(self.eval_str(t_att["esc"],v)) + def render_tag_escf(self,e,t_att,g_att,v): + return cgi.escape(self.eval_format(t_att["escf"],v)) + def render_tag_foreach(self,e,t_att,g_att,v): + expr=t_att["foreach"] + enum=self.eval_object(expr,v) + if enum!=None: + var=t_att.get('as',expr).replace('.','_') + d=v.copy() + size=-1 + if isinstance(enum,types.ListType): + size=len(enum) + elif isinstance(enum,types.TupleType): + size=len(enum) + elif hasattr(enum,'count'): + size=enum.count() + d["%s_size"%var]=size + d["%s_all"%var]=enum + index=0 + ru=[] + for i in enum: + d["%s_value"%var]=i + d["%s_index"%var]=index + d["%s_first"%var]=index==0 + d["%s_even"%var]=index%2 + d["%s_odd"%var]=(index+1)%2 + d["%s_last"%var]=index+1==size + if index%2: + d["%s_parity"%var]='odd' + else: + d["%s_parity"%var]='even' + if isinstance(i,types.DictType): + d.update(i) + else: + d[var]=i + ru.append(self.render_element(e,g_att,d)) + index+=1 + return "".join(ru) + else: + return "qweb: t-foreach %s not found."%expr + def render_tag_if(self,e,t_att,g_att,v): + if self.eval_bool(t_att["if"],v): + return self.render_element(e,g_att,v) + else: + return "" + def render_tag_call(self,e,t_att,g_att,v): + # TODO t-prefix + if t_att.has_key("import"): + d=v + else: + d=v.copy() + d[0]=self.render_element(e,g_att,d) + return self.render(t_att["call"],d) + def render_tag_set(self,e,t_att,g_att,v): + if t_att.has_key("eval"): + v[t_att["set"]]=self.eval_object(t_att["eval"],v) + else: + v[t_att["set"]]=self.render_element(e,g_att,v) + return "" + +#---------------------------------------------------------- +# QWeb HTML (+deprecated QWebFORM and QWebOLD) +#---------------------------------------------------------- +class QWebURL: + """ URL helper + assert req.PATH_INFO== "/site/admin/page_edit" + u = QWebURL(root_path="/site/",req_path=req.PATH_INFO) + s=u.url2_href("user/login",{'a':'1'}) + assert s=="../user/login?a=1" + + """ + def __init__(self, root_path="/", req_path="/",defpath="",defparam={}): + self.defpath=defpath + self.defparam=defparam + self.root_path=root_path + self.req_path=req_path + self.req_list=req_path.split("/")[:-1] + self.req_len=len(self.req_list) + def decode(self,s): + h={} + for k,v in cgi.parse_qsl(s,1): + h[k]=v + return h + def encode(self,h): + return urllib.urlencode(h.items()) + def request(self,req): + return req.REQUEST + def copy(self,path=None,param=None): + npath=self.defpath + if path: + npath=path + nparam=self.defparam.copy() + if param: + nparam.update(param) + return QWebURL(self.root_path,self.req_path,npath,nparam) + def path(self,path=''): + if not path: + path=self.defpath + pl=(self.root_path+path).split('/') + i=0 + for i in range(min(len(pl), self.req_len)): + if pl[i]!=self.req_list[i]: + break + else: + i+=1 + dd=self.req_len-i + if dd<0: + dd=0 + return '/'.join(['..']*dd+pl[i:]) + def href(self,path='',arg={}): + p=self.path(path) + tmp=self.defparam.copy() + tmp.update(arg) + s=self.encode(tmp) + if len(s): + return p+"?"+s + else: + return p + def form(self,path='',arg={}): + p=self.path(path) + tmp=self.defparam.copy() + tmp.update(arg) + r=''.join([''%(k,cgi.escape(str(v),1)) for k,v in tmp.items()]) + return (p,r) +class QWebField: + def __init__(self,name=None,default="",check=None): + self.name=name + self.default=default + self.check=check + # optional attributes + self.type=None + self.trim=1 + self.required=1 + self.cssvalid="form_valid" + self.cssinvalid="form_invalid" + # set by addfield + self.form=None + # set by processing + self.input=None + self.css=None + self.value=None + self.valid=None + self.invalid=None + self.validate(1) + def validate(self,val=1,update=1): + if val: + self.valid=1 + self.invalid=0 + self.css=self.cssvalid + else: + self.valid=0 + self.invalid=1 + self.css=self.cssinvalid + if update and self.form: + self.form.update() + def invalidate(self,update=1): + self.validate(0,update) +class QWebForm: + class QWebFormF: + pass + def __init__(self,e=None,arg=None,default=None): + self.fields={} + # all fields have been submitted + self.submitted=False + self.missing=[] + # at least one field is invalid or missing + self.invalid=False + self.error=[] + # all fields have been submitted and are valid + self.valid=False + # fields under self.f for convenience + self.f=self.QWebFormF() + if e: + self.add_template(e) + # assume that the fields are done with the template + if default: + self.set_default(default,e==None) + if arg!=None: + self.process_input(arg) + def __getitem__(self,k): + return self.fields[k] + def set_default(self,default,add_missing=1): + for k,v in default.items(): + if self.fields.has_key(k): + self.fields[k].default=str(v) + elif add_missing: + self.add_field(QWebField(k,v)) + def add_field(self,f): + self.fields[f.name]=f + f.form=self + setattr(self.f,f.name,f) + def add_template(self,e): + att={} + for (an,av) in e.attributes.items(): + an=str(an) + if an.startswith("t-"): + att[an[2:]]=av.encode("utf8") + for i in ["form-text", "form-password", "form-radio", "form-checkbox", "form-select","form-textarea"]: + if att.has_key(i): + name=att[i].split(".")[-1] + default=att.get("default","") + check=att.get("check",None) + f=QWebField(name,default,check) + if i=="form-textarea": + f.type="textarea" + f.trim=0 + if i=="form-checkbox": + f.type="checkbox" + f.required=0 + self.add_field(f) + for n in e.childNodes: + if n.nodeType==n.ELEMENT_NODE: + self.add_template(n) + def process_input(self,arg): + for f in self.fields.values(): + if arg.has_key(f.name): + f.input=arg[f.name] + f.value=f.input + if f.trim: + f.input=f.input.strip() + f.validate(1,False) + if f.check==None: + continue + elif callable(f.check): + pass + elif isinstance(f.check,str): + v=f.check + if f.check=="email": + v=r"/^[^@#!& ]+@[A-Za-z0-9-][.A-Za-z0-9-]{0,64}\.[A-Za-z]{2,5}$/" + if f.check=="date": + v=r"/^(19|20)\d\d-(0[1-9]|1[012])-(0[1-9]|[12][0-9]|3[01])$/" + if not re.match(v[1:-1],f.input): + f.validate(0,False) + else: + f.value=f.default + self.update() + def validate_all(self,val=1): + for f in self.fields.values(): + f.validate(val,0) + self.update() + def invalidate_all(self): + self.validate_all(0) + def update(self): + self.submitted=True + self.valid=True + self.errors=[] + for f in self.fields.values(): + if f.required and f.input==None: + self.submitted=False + self.valid=False + self.missing.append(f.name) + if f.invalid: + self.valid=False + self.error.append(f.name) + # invalid have been submitted and + self.invalid=self.submitted and self.valid==False + def collect(self): + d={} + for f in self.fields.values(): + d[f.name]=f.value + return d +class QWebURLEval(QWebEval): + def __init__(self,data): + QWebEval.__init__(self,data) + def __getitem__(self,expr): + r=QWebEval.__getitem__(self,expr) + if isinstance(r,str): + return urllib.quote_plus(r) + else: + return r +class QWebHtml(QWebXml): + """QWebHtml + QWebURL: + QWebField: + QWebForm: + QWebHtml: + an extended template engine, with a few utility class to easily produce + HTML, handle URLs and process forms, it adds the following magic attributes: + + t-href t-action t-form-text t-form-password t-form-textarea t-form-radio + t-form-checkbox t-form-select t-option t-selected t-checked t-pager + + # explication URL: + # v['tableurl']=QWebUrl({p=afdmin,saar=,orderby=,des=,mlink;meta_active=}) + # t-href="tableurl?desc=1" + # + # explication FORM: t-if="form.valid()" + # Foreach i + # email: + # + # + # Simple forms: + # + # + # + # + #