Moved logger references to file headers

Moved the logger references in health_checker.py and
listener_manager.py to the file headers to match convention.
Calls to the logger were readjusted to reflect the new
variable names. Also, copied the changes which removed the
use of the pass-in logger and uses oslo_log's log.getLogger()
to handle logging.

Story: #2001031
Task: #4232

Change-Id: I66beb635a78e5f6f0caae0b5a63c55dfdb5d15fe
This commit is contained in:
Jake Carlson 2017-06-13 01:45:31 -05:00
parent 2109da9972
commit 77d4e1e47f
20 changed files with 475 additions and 457 deletions

View File

@ -13,31 +13,33 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Daemon foe Valet Engine."""
import os import os
import sys import sys
import traceback import traceback
from oslo_log import log
from valet.common.conf import get_logger from valet.common.conf import get_logger
from valet.engine.optimizer.ostro.ostro import Ostro from valet.engine.optimizer.ostro.ostro import Ostro
from valet.engine.optimizer.ostro_server.configuration import Config from valet.engine.optimizer.ostro_server.configuration import Config
from valet.engine.optimizer.ostro_server.daemon import Daemon from valet.engine.optimizer.ostro_server.daemon import Daemon
LOG = log.getLogger(__name__)
class OstroDaemon(Daemon): class OstroDaemon(Daemon):
"""Daemon foe Valet Engine.""" """Daemon for Valet Engine."""
def run(self): def run(self):
"""Run the daemon.""" """Run the daemon."""
self.logger.info("##### Valet Engine is launched #####") LOG.info("Valet Engine is launched")
try: try:
ostro = Ostro(config, self.logger) ostro = Ostro(config)
except Exception: except Exception:
self.logger.error(traceback.format_exc()) LOG.error(traceback.format_exc())
if ostro.bootstrap() is False: if ostro.bootstrap() is False:
self.logger.error("ostro bootstrap failed") LOG.error("Valet Engine bootstrap failed")
sys.exit(2) sys.exit(2)
# Write pidfile # Write pidfile

View File

@ -20,11 +20,14 @@ import json
import pika import pika
import threading import threading
import traceback import traceback
from valet.common.conf import get_logger
from oslo_log import log
from valet.common.music import Music from valet.common.music import Music
from valet.engine.listener.oslo_messages import OsloMessage from valet.engine.listener.oslo_messages import OsloMessage
import yaml import yaml
LOG = log.getLogger(__name__)
class ListenerManager(threading.Thread): class ListenerManager(threading.Thread):
"""Listener Manager Thread Class.""" """Listener Manager Thread Class."""
@ -35,7 +38,6 @@ class ListenerManager(threading.Thread):
self.thread_id = _t_id self.thread_id = _t_id
self.thread_name = _t_name self.thread_name = _t_name
self.config = _config self.config = _config
self.listener_logger = get_logger("ostro_listener")
self.MUSIC = None self.MUSIC = None
def run(self): def run(self):
@ -47,7 +49,7 @@ class ListenerManager(threading.Thread):
credentials = pika.PlainCredentials("guest", "PASSWORD"). credentials = pika.PlainCredentials("guest", "PASSWORD").
""" """
try: try:
self.listener_logger.info("ListenerManager: start " + LOG.info("ListenerManager: start " +
self.thread_name + " ......") self.thread_name + " ......")
if self.config.events_listener.store: if self.config.events_listener.store:
@ -58,19 +60,21 @@ class ListenerManager(threading.Thread):
'replication_factor': self.config.music.replication_factor, 'replication_factor': self.config.music.replication_factor,
'music_server_retries': 'music_server_retries':
self.config.music.music_server_retries, self.config.music.music_server_retries,
'logger': self.listener_logger, 'logger': LOG,
} }
engine = Music(**kwargs) engine = Music(**kwargs)
engine.create_keyspace(self.config.music.keyspace) engine.create_keyspace(self.config.music.keyspace)
self.MUSIC = {'engine': engine, self.MUSIC = {'engine': engine,
'keyspace': self.config.music.keyspace} 'keyspace': self.config.music.keyspace}
self.listener_logger.debug(
'Storing in music on %s, keyspace %s',
self.config.music.hosts, self.config.music.keyspace)
self.listener_logger.debug('Connecting to %s, with %s', LOG.debug(
self.config.messaging.host, 'Storing in music on %s, keyspace %s' %
self.config.messaging.username) (self.config.music.host, self.config.music.keyspace))
LOG.debug('Connecting to %s, with %s' %
(self.config.messaging.host,
self.config.messaging.username))
credentials = pika.PlainCredentials(self.config.messaging.username, credentials = pika.PlainCredentials(self.config.messaging.username,
self.config.messaging.password) self.config.messaging.password)
parameters = pika.ConnectionParameters(self.config.messaging.host, parameters = pika.ConnectionParameters(self.config.messaging.host,
@ -103,15 +107,16 @@ class ListenerManager(threading.Thread):
# Bind the queue to the selected exchange # Bind the queue to the selected exchange
channel.queue_bind(exchange=exchange_name, queue=queue_name, channel.queue_bind(exchange=exchange_name, queue=queue_name,
routing_key=binding_key) routing_key=binding_key)
self.listener_logger.info('Channel is bound,listening on %s '
'exchange %s', LOG.info('Channel is bound,listening on %s exchange %s',
self.config.messaging.host, self.config.messaging.host,
self.config.events_listener.exchange) self.config.events_listener.exchange)
# Start consuming messages # Start consuming messages
channel.basic_consume(self.on_message, queue_name) channel.basic_consume(self.on_message, queue_name)
except Exception: except Exception:
self.listener_logger.error(traceback.format_exc()) LOG.error("Failed to start ListenerManager thread: %s",
traceback.format_exc())
return return
try: try:
@ -136,13 +141,14 @@ class ListenerManager(threading.Thread):
else: else:
return return
self.listener_logger.debug( LOG.debug(
"\nMessage No: %s\n", method_frame.delivery_tag) "\nMessage No: %s\n", method_frame.delivery_tag)
self.listener_logger.debug( LOG.debug(
json.dumps(message, sort_keys=True, indent=2)) json.dumps(message, sort_keys=True, indent=2))
channel.basic_ack(delivery_tag=method_frame.delivery_tag) channel.basic_ack(delivery_tag=method_frame.delivery_tag)
except Exception: except Exception:
self.listener_logger.error(traceback.format_exc()) LOG.error("Could not specify action for message: %s",
traceback.format_exc())
return return
def is_message_wanted(self, message): def is_message_wanted(self, message):

View File

@ -18,10 +18,14 @@
import operator import operator
import time import time
from oslo_log import log
from valet.engine.optimizer.app_manager.app_topology import AppTopology from valet.engine.optimizer.app_manager.app_topology import AppTopology
from valet.engine.optimizer.app_manager.app_topology_base import VM from valet.engine.optimizer.app_manager.app_topology_base import VM
from valet.engine.optimizer.app_manager.application import App from valet.engine.optimizer.app_manager.application import App
LOG = log.getLogger(__name__)
class AppHistory(object): class AppHistory(object):
@ -40,12 +44,11 @@ class AppHandler(object):
placement and updating topology info. placement and updating topology info.
""" """
def __init__(self, _resource, _db, _config, _logger): def __init__(self, _resource, _db, _config):
"""Init App Handler Class.""" """Init App Handler Class."""
self.resource = _resource self.resource = _resource
self.db = _db self.db = _db
self.config = _config self.config = _config
self.logger = _logger
""" current app requested, a temporary copy """ """ current app requested, a temporary copy """
self.apps = {} self.apps = {}
@ -109,7 +112,7 @@ class AppHandler(object):
"""Add app and set or regenerate topology, return updated topology.""" """Add app and set or regenerate topology, return updated topology."""
self.apps.clear() self.apps.clear()
app_topology = AppTopology(self.resource, self.logger) app_topology = AppTopology(self.resource)
stack_id = None stack_id = None
if "stack_id" in _app.keys(): if "stack_id" in _app.keys():
@ -124,9 +127,7 @@ class AppHandler(object):
application_name = "none" application_name = "none"
action = _app["action"] action = _app["action"]
if action == "ping": if action == "replan" or action == "migrate":
self.logger.info("got ping")
elif action == "replan" or action == "migrate":
re_app = self._regenerate_app_topology(stack_id, _app, re_app = self._regenerate_app_topology(stack_id, _app,
app_topology, action) app_topology, action)
if re_app is None: if re_app is None:
@ -136,14 +137,15 @@ class AppHandler(object):
return None return None
if action == "replan": if action == "replan":
self.logger.info("got replan: " + stack_id) LOG.info("got replan: " + stack_id)
elif action == "migrate": elif action == "migrate":
self.logger.info("got migration: " + stack_id) LOG.info("got migration: " + stack_id)
app_id = app_topology.set_app_topology(re_app) app_id = app_topology.set_app_topology(re_app)
if app_id is None: if app_id is None:
self.logger.error(app_topology.status) LOG.error("Could not set app topology for regererated graph." +
app_topology.status)
self.status = app_topology.status self.status = app_topology.status
self.apps[stack_id] = None self.apps[stack_id] = None
return None return None
@ -151,12 +153,13 @@ class AppHandler(object):
app_id = app_topology.set_app_topology(_app) app_id = app_topology.set_app_topology(_app)
if len(app_topology.candidate_list_map) > 0: if len(app_topology.candidate_list_map) > 0:
self.logger.info("got ad-hoc placement: " + stack_id) LOG.info("got ad-hoc placement: " + stack_id)
else: else:
self.logger.info("got placement: " + stack_id) LOG.info("got placement: " + stack_id)
if app_id is None: if app_id is None:
self.logger.error(app_topology.status) LOG.error("Could not set app topology for app graph" +
app_topology.status)
self.status = app_topology.status self.status = app_topology.status
self.apps[stack_id] = None self.apps[stack_id] = None
return None return None
@ -216,7 +219,7 @@ class AppHandler(object):
if self.db is not None: if self.db is not None:
for appk, _ in self.apps.iteritems(): for appk, _ in self.apps.iteritems():
if self.db.add_app(appk, None) is False: if self.db.add_app(appk, None) is False:
self.logger.error("AppHandler: error while adding app " LOG.error("AppHandler: error while adding app "
"info to MUSIC") "info to MUSIC")
def get_vm_info(self, _s_uuid, _h_uuid, _host): def get_vm_info(self, _s_uuid, _h_uuid, _host):
@ -241,12 +244,10 @@ class AppHandler(object):
old_app = self.db.get_app_info(_stack_id) old_app = self.db.get_app_info(_stack_id)
if old_app is None: if old_app is None:
self.status = "error while getting old_app from MUSIC" LOG.error("Error while getting old_app from MUSIC")
self.logger.error(self.status)
return None return None
elif len(old_app) == 0: elif len(old_app) == 0:
self.status = "cannot find the old app in MUSIC" LOG.error("Cannot find the old app in MUSIC")
self.logger.error(self.status)
return None return None
re_app["action"] = "create" re_app["action"] = "create"

View File

@ -24,7 +24,7 @@ class AppTopology(object):
calculating and setting optimization. calculating and setting optimization.
""" """
def __init__(self, _resource, _logger): def __init__(self, _resource):
"""Init App Topology Class.""" """Init App Topology Class."""
self.vgroups = {} self.vgroups = {}
self.vms = {} self.vms = {}
@ -38,14 +38,13 @@ class AppTopology(object):
self.exclusion_list_map = {} self.exclusion_list_map = {}
self.resource = _resource self.resource = _resource
self.logger = _logger
# restriction of host naming convention # restriction of host naming convention
high_level_allowed = True high_level_allowed = True
if "none" in self.resource.datacenter.region_code_list: if "none" in self.resource.datacenter.region_code_list:
high_level_allowed = False high_level_allowed = False
self.parser = Parser(high_level_allowed, self.logger) self.parser = Parser(high_level_allowed)
self.total_CPU = 0 self.total_CPU = 0
self.total_mem = 0 self.total_mem = 0

View File

@ -24,13 +24,15 @@
OS::Heat::ResourceGroup OS::Heat::ResourceGroup
OS::Heat::ResourceGroup OS::Heat::ResourceGroup
""" """
from oslo_log import log
import six import six
from valet.engine.optimizer.app_manager.app_topology_base import LEVELS from valet.engine.optimizer.app_manager.app_topology_base import LEVELS
from valet.engine.optimizer.app_manager.app_topology_base import VGroup from valet.engine.optimizer.app_manager.app_topology_base import VGroup
from valet.engine.optimizer.app_manager.app_topology_base import VM from valet.engine.optimizer.app_manager.app_topology_base import VM
LOG = log.getLogger(__name__)
class Parser(object): class Parser(object):
"""Parser Class. """Parser Class.
@ -41,10 +43,8 @@ class Parser(object):
OS::Heat::Stack OS::Heat::ResourceGroup OS::Heat::Stack OS::Heat::ResourceGroup
""" """
def __init__(self, _high_level_allowed, _logger): def __init__(self, _high_level_allowed):
"""Init Parser Class.""" """Init Parser Class."""
self.logger = _logger
self.high_level_allowed = _high_level_allowed self.high_level_allowed = _high_level_allowed
self.format_version = None self.format_version = None
@ -109,9 +109,9 @@ class Parser(object):
if len(r["locations"]) > 0: if len(r["locations"]) > 0:
self.candidate_list_map[rk] = r["locations"] self.candidate_list_map[rk] = r["locations"]
vms[vm.uuid] = vm vms[vm.uuid] = vm
self.logger.info("vm = " + vm.uuid) LOG.info("vm = " + vm.uuid)
elif r["type"] == "OS::Cinder::Volume": elif r["type"] == "OS::Cinder::Volume":
self.logger.warn("Parser: do nothing for volume at this " LOG.warning("Parser: do nothing for volume at this "
"version") "version")
elif r["type"] == "ATT::Valet::GroupAssignment": elif r["type"] == "ATT::Valet::GroupAssignment":
@ -154,7 +154,7 @@ class Parser(object):
return {}, {} return {}, {}
vgroups[vgroup.uuid] = vgroup vgroups[vgroup.uuid] = vgroup
msg = "group = %s, type = %s" msg = "group = %s, type = %s"
self.logger.info(msg % (vgroup.name, vgroup.vgroup_type)) LOG.info(msg % (vgroup.name, vgroup.vgroup_type))
if self._merge_diversity_groups(_elements, vgroups, vms) is False: if self._merge_diversity_groups(_elements, vgroups, vms) is False:
return {}, {} return {}, {}

View File

@ -17,9 +17,14 @@
import json import json
import operator import operator
from oslo_log import log
from valet.common.music import Music from valet.common.music import Music
from valet.engine.optimizer.db_connect.event import Event from valet.engine.optimizer.db_connect.event import Event
LOG = log.getLogger(__name__)
def ensurekey(d, k): def ensurekey(d, k):
return d.setdefault(k, {}) return d.setdefault(k, {})
@ -33,20 +38,18 @@ class MusicHandler(object):
database for valet and returns/deletes/updates objects within it. database for valet and returns/deletes/updates objects within it.
""" """
def __init__(self, _config, _logger): def __init__(self, _config):
"""Init Music Handler.""" """Init Music Handler."""
self.config = _config self.config = _config
self.logger = _logger
self.music = Music( self.music = Music(
hosts=self.config.hosts, port=self.config.port, hosts=self.config.hosts, port=self.config.port,
replication_factor=self.config.replication_factor, replication_factor=self.config.replication_factor,
music_server_retries=self.config.music_server_retries, music_server_retries=self.config.music_server_retries)
logger=self.logger)
if self.config.hosts is not None: if self.config.hosts is not None:
self.logger.info("DB: music host = %s", self.config.hosts) LOG.info("DB: music host = %s", self.config.hosts)
if self.config.replication_factor is not None: if self.config.replication_factor is not None:
self.logger.info("DB: music replication factor = %s ", LOG.info("DB: music replication factor = %s ",
str(self.config.replication_factor)) str(self.config.replication_factor))
# FIXME(GJ): this may not need # FIXME(GJ): this may not need
@ -57,12 +60,12 @@ class MusicHandler(object):
necessary tables with the proper schemas in Music using API calls. necessary tables with the proper schemas in Music using API calls.
Return True if no exceptions are caught. Return True if no exceptions are caught.
""" """
self.logger.info("MusicHandler.init_db: create table") LOG.info("MusicHandler.init_db: create table")
try: try:
self.music.create_keyspace(self.config.db_keyspace) self.music.create_keyspace(self.config.db_keyspace)
except Exception as e: except Exception as e:
self.logger.error("DB: " + str(e)) LOG.error("DB could not create keyspace: " + str(e))
return False return False
schema = { schema = {
@ -74,7 +77,7 @@ class MusicHandler(object):
self.music.create_table(self.config.db_keyspace, self.music.create_table(self.config.db_keyspace,
self.config.db_request_table, schema) self.config.db_request_table, schema)
except Exception as e: except Exception as e:
self.logger.error("DB: " + str(e)) LOG.error("DB could not create request table: " + str(e))
return False return False
schema = { schema = {
@ -86,7 +89,7 @@ class MusicHandler(object):
self.music.create_table(self.config.db_keyspace, self.music.create_table(self.config.db_keyspace,
self.config.db_response_table, schema) self.config.db_response_table, schema)
except Exception as e: except Exception as e:
self.logger.error("DB: " + str(e)) LOG.error("DB could not create response table: " + str(e))
return False return False
schema = { schema = {
@ -100,7 +103,7 @@ class MusicHandler(object):
self.music.create_table(self.config.db_keyspace, self.music.create_table(self.config.db_keyspace,
self.config.db_event_table, schema) self.config.db_event_table, schema)
except Exception as e: except Exception as e:
self.logger.error("DB: " + str(e)) LOG.error("DB could not create event table: " + str(e))
return False return False
schema = { schema = {
@ -112,7 +115,7 @@ class MusicHandler(object):
self.music.create_table(self.config.db_keyspace, self.music.create_table(self.config.db_keyspace,
self.config.db_resource_table, schema) self.config.db_resource_table, schema)
except Exception as e: except Exception as e:
self.logger.error("DB: " + str(e)) LOG.error("DB could not create resource table: " + str(e))
return False return False
schema = { schema = {
@ -124,7 +127,7 @@ class MusicHandler(object):
self.music.create_table(self.config.db_keyspace, self.music.create_table(self.config.db_keyspace,
self.config.db_app_table, schema) self.config.db_app_table, schema)
except Exception as e: except Exception as e:
self.logger.error("DB: " + str(e)) LOG.error("DB could not create app table: " + str(e))
return False return False
schema = { schema = {
@ -137,7 +140,7 @@ class MusicHandler(object):
self.music.create_table(self.config.db_keyspace, self.music.create_table(self.config.db_keyspace,
self.config.db_uuid_table, schema) self.config.db_uuid_table, schema)
except Exception as e: except Exception as e:
self.logger.error("DB: " + str(e)) LOG.error("DB could not create uuid table: " + str(e))
return False return False
return True return True
@ -157,7 +160,7 @@ class MusicHandler(object):
events = self.music.read_all_rows(self.config.db_keyspace, events = self.music.read_all_rows(self.config.db_keyspace,
self.config.db_event_table) self.config.db_event_table)
except Exception as e: except Exception as e:
self.logger.error("DB:event: " + str(e)) LOG.error("DB:event: " + str(e))
# FIXME(GJ): return None? # FIXME(GJ): return None?
return {} return {}
@ -168,13 +171,13 @@ class MusicHandler(object):
method = row['method'] method = row['method']
args_data = row['args'] args_data = row['args']
self.logger.debug("MusicHandler.get_events: event (" + LOG.debug("MusicHandler.get_events: event (" +
event_id + ") is entered") event_id + ") is entered")
if exchange != "nova": if exchange != "nova":
if self.delete_event(event_id) is False: if self.delete_event(event_id) is False:
return None return None
self.logger.debug( LOG.debug(
"MusicHandler.get_events: event exchange " "MusicHandler.get_events: event exchange "
"(" + exchange + ") is not supported") "(" + exchange + ") is not supported")
continue continue
@ -183,21 +186,21 @@ class MusicHandler(object):
'instance': 'instance':
if self.delete_event(event_id) is False: if self.delete_event(event_id) is False:
return None return None
self.logger.debug("MusicHandler.get_events: event method " LOG.debug("MusicHandler.get_events: event method "
"(" + method + ") is not considered") "(" + method + ") is not considered")
continue continue
if len(args_data) == 0: if len(args_data) == 0:
if self.delete_event(event_id) is False: if self.delete_event(event_id) is False:
return None return None
self.logger.debug("MusicHandler.get_events: " LOG.debug("MusicHandler.get_events: "
"event does not have args") "event does not have args")
continue continue
try: try:
args = json.loads(args_data) args = json.loads(args_data)
except (ValueError, KeyError, TypeError): except (ValueError, KeyError, TypeError):
self.logger.warn("MusicHandler.get_events: error while " LOG.warning("MusicHandler.get_events: error while "
"decoding to JSON event = " + method + "decoding to JSON event = " + method +
":" + event_id) ":" + event_id)
continue continue
@ -227,11 +230,11 @@ class MusicHandler(object):
event_list.append(e) event_list.append(e)
else: else:
msg = "unknown vm_state = %s" msg = "unknown vm_state = %s"
self.logger.warn( LOG.warning(
msg % change_data["vm_state"]) msg % change_data["vm_state"])
if 'uuid' in change_data.keys(): if 'uuid' in change_data.keys():
msg = " uuid = %s" msg = " uuid = %s"
self.logger.warn( LOG.warning(
msg % change_data['uuid']) msg % change_data['uuid'])
if not self.delete_event(event_id): if not self.delete_event(event_id):
return None return None
@ -296,19 +299,19 @@ class MusicHandler(object):
e.host is None or e.host == "none" or \ e.host is None or e.host == "none" or \
e.vcpus == -1 or e.mem == -1: e.vcpus == -1 or e.mem == -1:
error_event_list.append(e) error_event_list.append(e)
self.logger.warn("MusicHandler.get_events: data " LOG.warning("MusicHandler.get_events: data "
"missing in instance object event") "missing in instance object event")
elif e.object_name == 'ComputeNode': elif e.object_name == 'ComputeNode':
if e.host is None or e.host == "none": if e.host is None or e.host == "none":
error_event_list.append(e) error_event_list.append(e)
self.logger.warn("MusicHandler.get_events: data " LOG.warning("MusicHandler.get_events: data "
"missing in compute object event") "missing in compute object event")
elif e.method == "build_and_run_instance": elif e.method == "build_and_run_instance":
if e.uuid is None or e.uuid == "none": if e.uuid is None or e.uuid == "none":
error_event_list.append(e) error_event_list.append(e)
self.logger.warn("MusicHandler.get_events: data missing " LOG.warning("MusicHandler.get_events: data missing "
"in build event") "in build event")
if len(error_event_list) > 0: if len(error_event_list) > 0:
@ -327,7 +330,7 @@ class MusicHandler(object):
self.config.db_event_table, self.config.db_event_table,
'timestamp', _event_id) 'timestamp', _event_id)
except Exception as e: except Exception as e:
self.logger.error("DB: while deleting event: " + str(e)) LOG.error("DB: while deleting event: " + str(e))
return False return False
return True return True
@ -342,7 +345,7 @@ class MusicHandler(object):
row = self.music.read_row(self.config.db_keyspace, row = self.music.read_row(self.config.db_keyspace,
self.config.db_uuid_table, 'uuid', _uuid) self.config.db_uuid_table, 'uuid', _uuid)
except Exception as e: except Exception as e:
self.logger.error("DB: while reading uuid: " + str(e)) LOG.error("DB: while reading uuid: " + str(e))
return None return None
if len(row) > 0: if len(row) > 0:
@ -376,7 +379,7 @@ class MusicHandler(object):
self.music.create_row(self.config.db_keyspace, self.music.create_row(self.config.db_keyspace,
self.config.db_uuid_table, data) self.config.db_uuid_table, data)
except Exception as e: except Exception as e:
self.logger.error("DB: while inserting uuid: " + str(e)) LOG.error("DB: while inserting uuid: " + str(e))
return False return False
return True return True
@ -388,7 +391,7 @@ class MusicHandler(object):
self.config.db_uuid_table, 'uuid', self.config.db_uuid_table, 'uuid',
_k) _k)
except Exception as e: except Exception as e:
self.logger.error("DB: while deleting uuid: " + str(e)) LOG.error("DB: while deleting uuid: " + str(e))
return False return False
return True return True
@ -402,16 +405,15 @@ class MusicHandler(object):
requests = self.music.read_all_rows(self.config.db_keyspace, requests = self.music.read_all_rows(self.config.db_keyspace,
self.config.db_request_table) self.config.db_request_table)
except Exception as e: except Exception as e:
self.logger.error("DB: while reading requests: " + str(e)) LOG.error("DB: while reading requests: " + str(e))
# FIXME(GJ): return None? # FIXME(GJ): return None?
return {} return {}
if len(requests) > 0: if len(requests) > 0:
self.logger.info("MusicHandler.get_requests: placement request " LOG.info("MusicHandler.get_requests: placement request arrived")
"arrived")
for _, row in requests.iteritems(): for _, row in requests.iteritems():
self.logger.info(" request_id = " + row['stack_id']) LOG.info(" request_id = " + row['stack_id'])
r_list = json.loads(row['request']) r_list = json.loads(row['request'])
for r in r_list: for r in r_list:
@ -431,7 +433,7 @@ class MusicHandler(object):
self.music.create_row(self.config.db_keyspace, self.music.create_row(self.config.db_keyspace,
self.config.db_response_table, data) self.config.db_response_table, data)
except Exception as e: except Exception as e:
self.logger.error("MUSIC error while putting placement " LOG.error("MUSIC error while putting placement "
"result: " + str(e)) "result: " + str(e))
return False return False
@ -441,7 +443,7 @@ class MusicHandler(object):
self.config.db_request_table, self.config.db_request_table,
'stack_id', appk) 'stack_id', appk)
except Exception as e: except Exception as e:
self.logger.error("MUSIC error while deleting handled " LOG.error("MUSIC error while deleting handled "
"request: " + str(e)) "request: " + str(e))
return False return False
@ -455,9 +457,9 @@ class MusicHandler(object):
try: try:
row = self.music.read_row(self.config.db_keyspace, row = self.music.read_row(self.config.db_keyspace,
self.config.db_resource_table, self.config.db_resource_table,
'site_name', _k, self.logger) 'site_name', _k)
except Exception as e: except Exception as e:
self.logger.error("MUSIC error while reading resource status: " + LOG.error("MUSIC error while reading resource status: " +
str(e)) str(e))
return None return None
@ -475,7 +477,7 @@ class MusicHandler(object):
self.config.db_resource_table, self.config.db_resource_table,
'site_name', _k) 'site_name', _k)
except Exception as e: except Exception as e:
self.logger.error("MUSIC error while reading resource status: " + LOG.error("MUSIC error while reading resource status: " +
str(e)) str(e))
return False return False
@ -525,7 +527,7 @@ class MusicHandler(object):
self.config.db_resource_table, self.config.db_resource_table,
'site_name', _k) 'site_name', _k)
except Exception as e: except Exception as e:
self.logger.error("MUSIC error while deleting resource " LOG.error("MUSIC error while deleting resource "
"status: " + str(e)) "status: " + str(e))
return False return False
@ -541,10 +543,10 @@ class MusicHandler(object):
self.music.create_row(self.config.db_keyspace, self.music.create_row(self.config.db_keyspace,
self.config.db_resource_table, data) self.config.db_resource_table, data)
except Exception as e: except Exception as e:
self.logger.error("DB: " + str(e)) LOG.error("DB could not create row in resource table: " + str(e))
return False return False
self.logger.info("DB: resource status updated") LOG.info("DB: resource status updated")
return True return True
@ -555,7 +557,7 @@ class MusicHandler(object):
self.config.db_keyspace, self.config.db_app_table, self.config.db_keyspace, self.config.db_app_table,
'stack_id', _k) 'stack_id', _k)
except Exception as e: except Exception as e:
self.logger.error("DB: while deleting app: " + str(e)) LOG.error("DB: while deleting app: " + str(e))
return False return False
if _app_data is not None: if _app_data is not None:
@ -568,7 +570,7 @@ class MusicHandler(object):
self.music.create_row(self.config.db_keyspace, self.music.create_row(self.config.db_keyspace,
self.config.db_app_table, data) self.config.db_app_table, data)
except Exception as e: except Exception as e:
self.logger.error("DB: while inserting app: " + str(e)) LOG.error("DB: while inserting app: " + str(e))
return False return False
return True return True
@ -583,7 +585,7 @@ class MusicHandler(object):
self.config.db_app_table, 'stack_id', self.config.db_app_table, 'stack_id',
_s_uuid) _s_uuid)
except Exception as e: except Exception as e:
self.logger.error("DB: while reading app info: " + str(e)) LOG.error("DB: while reading app info: " + str(e))
return None return None
if len(row) > 0: if len(row) > 0:
@ -606,7 +608,7 @@ class MusicHandler(object):
self.config.db_app_table, 'stack_id', self.config.db_app_table, 'stack_id',
_s_uuid) _s_uuid)
except Exception as e: except Exception as e:
self.logger.error("DB: " + str(e)) LOG.error("DB could not read row in app table: " + str(e))
return None return None
if len(row) > 0: if len(row) > 0:
@ -620,24 +622,24 @@ class MusicHandler(object):
if vm["host"] != _host: if vm["host"] != _host:
vm["planned_host"] = vm["host"] vm["planned_host"] = vm["host"]
vm["host"] = _host vm["host"] = _host
self.logger.warn("db: conflicted placement " LOG.warning("DB: conflicted placement "
"decision from Ostro") "decision from Ostro")
# TODO(GY): affinity, diversity, exclusivity # TODO(GY): affinity, diversity, exclusivity
# validation check # validation check
updated = True updated = True
else: else:
vm["status"] = "scheduled" vm["status"] = "scheduled"
self.logger.warn("DB: vm was deleted") LOG.warning("DB: vm was deleted")
updated = True updated = True
vm_info = vm vm_info = vm
break break
else: else:
self.logger.error("MusicHandler.get_vm_info: vm is missing " LOG.error("MusicHandler.get_vm_info: vm is missing "
"from stack") "from stack")
else: else:
self.logger.warn("MusicHandler.get_vm_info: not found stack for " LOG.warning("MusicHandler.get_vm_info: not found stack for "
"update = " + _s_uuid) "update = " + _s_uuid)
if updated is True: if updated is True:
@ -657,7 +659,7 @@ class MusicHandler(object):
self.config.db_app_table, 'stack_id', self.config.db_app_table, 'stack_id',
_s_uuid) _s_uuid)
except Exception as e: except Exception as e:
self.logger.error("DB: " + str(e)) LOG.error("DB could not read row in app table: " + str(e))
return False return False
if len(row) > 0: if len(row) > 0:
@ -669,17 +671,17 @@ class MusicHandler(object):
if vmk == _h_uuid: if vmk == _h_uuid:
if vm["status"] != "deleted": if vm["status"] != "deleted":
vm["status"] = "deleted" vm["status"] = "deleted"
self.logger.warn("DB: deleted marked") LOG.warning("DB: deleted marked")
updated = True updated = True
else: else:
self.logger.warn("DB: vm was already deleted") LOG.warning("DB: vm was already deleted")
break break
else: else:
self.logger.error("MusicHandler.update_vm_info: vm is missing " LOG.error("MusicHandler.update_vm_info: vm is missing "
"from stack") "from stack")
else: else:
self.logger.warn("MusicHandler.update_vm_info: not found " LOG.warning("MusicHandler.update_vm_info: not found "
"stack for update = " + _s_uuid) "stack for update = " + _s_uuid)
if updated is True: if updated is True:

View File

@ -12,6 +12,7 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from oslo_log import log
from valet.engine.optimizer.app_manager.app_topology_base import LEVELS from valet.engine.optimizer.app_manager.app_topology_base import LEVELS
from valet.engine.optimizer.app_manager.app_topology_base import VGroup from valet.engine.optimizer.app_manager.app_topology_base import VGroup
@ -24,20 +25,21 @@ from valet.engine.optimizer.ostro.openstack_filters import CoreFilter
from valet.engine.optimizer.ostro.openstack_filters import DiskFilter from valet.engine.optimizer.ostro.openstack_filters import DiskFilter
from valet.engine.optimizer.ostro.openstack_filters import RamFilter from valet.engine.optimizer.ostro.openstack_filters import RamFilter
LOG = log.getLogger(__name__)
class ConstraintSolver(object): class ConstraintSolver(object):
"""ConstraintSolver.""" """ConstraintSolver."""
def __init__(self, _logger): def __init__(self):
"""Initialization.""" """Initialization."""
"""Instantiate filters to help enforce constraints.""" """Instantiate filters to help enforce constraints."""
self.logger = _logger
self.openstack_AZ = AvailabilityZoneFilter(self.logger) self.openstack_AZ = AvailabilityZoneFilter()
self.openstack_AIES = AggregateInstanceExtraSpecsFilter(self.logger) self.openstack_AIES = AggregateInstanceExtraSpecsFilter()
self.openstack_R = RamFilter(self.logger) self.openstack_R = RamFilter()
self.openstack_C = CoreFilter(self.logger) self.openstack_C = CoreFilter()
self.openstack_D = DiskFilter(self.logger) self.openstack_D = DiskFilter()
self.status = "success" self.status = "success"
@ -57,10 +59,10 @@ class ConstraintSolver(object):
candidate_list.append(r) candidate_list.append(r)
if len(candidate_list) == 0: if len(candidate_list) == 0:
self.status = "no candidate for node = " + _n.node.name self.status = "no candidate for node = " + _n.node.name
self.logger.warn(self.status) LOG.warning(self.status)
return candidate_list return candidate_list
else: else:
self.logger.debug("ConstraintSolver: num of candidates = " + LOG.debug("ConstraintSolver: num of candidates = " +
str(len(candidate_list))) str(len(candidate_list)))
"""Availability zone constraint.""" """Availability zone constraint."""
@ -72,7 +74,7 @@ class ConstraintSolver(object):
if len(candidate_list) == 0: if len(candidate_list) == 0:
self.status = "violate availability zone constraint for " \ self.status = "violate availability zone constraint for " \
"node = " + _n.node.name "node = " + _n.node.name
self.logger.error("ConstraintSolver: " + self.status) LOG.error("ConstraintSolver: " + self.status)
return candidate_list return candidate_list
"""Host aggregate constraint.""" """Host aggregate constraint."""
@ -82,7 +84,7 @@ class ConstraintSolver(object):
if len(candidate_list) == 0: if len(candidate_list) == 0:
self.status = "violate host aggregate constraint for " \ self.status = "violate host aggregate constraint for " \
"node = " + _n.node.name "node = " + _n.node.name
self.logger.error("ConstraintSolver: " + self.status) LOG.error("ConstraintSolver: " + self.status)
return candidate_list return candidate_list
"""CPU capacity constraint.""" """CPU capacity constraint."""
@ -91,7 +93,7 @@ class ConstraintSolver(object):
if len(candidate_list) == 0: if len(candidate_list) == 0:
self.status = "violate cpu capacity constraint for " \ self.status = "violate cpu capacity constraint for " \
"node = " + _n.node.name "node = " + _n.node.name
self.logger.error("ConstraintSolver: " + self.status) LOG.error("ConstraintSolver: " + self.status)
return candidate_list return candidate_list
"""Memory capacity constraint.""" """Memory capacity constraint."""
@ -100,7 +102,7 @@ class ConstraintSolver(object):
if len(candidate_list) == 0: if len(candidate_list) == 0:
self.status = "violate memory capacity constraint for " \ self.status = "violate memory capacity constraint for " \
"node = " + _n.node.name "node = " + _n.node.name
self.logger.error("ConstraintSolver: " + self.status) LOG.error("ConstraintSolver: " + self.status)
return candidate_list return candidate_list
"""Local disk capacity constraint.""" """Local disk capacity constraint."""
@ -109,7 +111,7 @@ class ConstraintSolver(object):
if len(candidate_list) == 0: if len(candidate_list) == 0:
self.status = "violate local disk capacity constraint for " \ self.status = "violate local disk capacity constraint for " \
"node = " + _n.node.name "node = " + _n.node.name
self.logger.error("ConstraintSolver: " + self.status) LOG.error("ConstraintSolver: " + self.status)
return candidate_list return candidate_list
""" diversity constraint """ """ diversity constraint """
@ -125,7 +127,7 @@ class ConstraintSolver(object):
if len(candidate_list) == 0: if len(candidate_list) == 0:
self.status = "violate diversity constraint for " \ self.status = "violate diversity constraint for " \
"node = " + _n.node.name "node = " + _n.node.name
self.logger.error("ConstraintSolver: " + self.status) LOG.error("ConstraintSolver: " + self.status)
return candidate_list return candidate_list
else: else:
self._constrain_diversity(_level, _n, _node_placements, self._constrain_diversity(_level, _n, _node_placements,
@ -133,7 +135,7 @@ class ConstraintSolver(object):
if len(candidate_list) == 0: if len(candidate_list) == 0:
self.status = "violate diversity constraint for " \ self.status = "violate diversity constraint for " \
"node = " + _n.node.name "node = " + _n.node.name
self.logger.error("ConstraintSolver: " + self.status) LOG.error("ConstraintSolver: " + self.status)
return candidate_list return candidate_list
"""Exclusivity constraint.""" """Exclusivity constraint."""
@ -142,7 +144,7 @@ class ConstraintSolver(object):
if len(exclusivities) > 1: if len(exclusivities) > 1:
self.status = "violate exclusivity constraint (more than one " \ self.status = "violate exclusivity constraint (more than one " \
"exclusivity) for node = " + _n.node.name "exclusivity) for node = " + _n.node.name
self.logger.error("ConstraintSolver: " + self.status) LOG.error("ConstraintSolver: " + self.status)
return [] return []
else: else:
if len(exclusivities) == 1: if len(exclusivities) == 1:
@ -153,14 +155,14 @@ class ConstraintSolver(object):
if len(candidate_list) == 0: if len(candidate_list) == 0:
self.status = "violate exclusivity constraint for " \ self.status = "violate exclusivity constraint for " \
"node = " + _n.node.name "node = " + _n.node.name
self.logger.error("ConstraintSolver: " + self.status) LOG.error("ConstraintSolver: " + self.status)
return candidate_list return candidate_list
else: else:
self._constrain_non_exclusivity(_level, candidate_list) self._constrain_non_exclusivity(_level, candidate_list)
if len(candidate_list) == 0: if len(candidate_list) == 0:
self.status = "violate non-exclusivity constraint for " \ self.status = "violate non-exclusivity constraint for " \
"node = " + _n.node.name "node = " + _n.node.name
self.logger.error("ConstraintSolver: " + self.status) LOG.error("ConstraintSolver: " + self.status)
return candidate_list return candidate_list
"""Affinity constraint.""" """Affinity constraint."""
@ -173,7 +175,7 @@ class ConstraintSolver(object):
if len(candidate_list) == 0: if len(candidate_list) == 0:
self.status = "violate affinity constraint for " \ self.status = "violate affinity constraint for " \
"node = " + _n.node.name "node = " + _n.node.name
self.logger.error("ConstraintSolver: " + self.status) LOG.error("ConstraintSolver: " + self.status)
return candidate_list return candidate_list
return candidate_list return candidate_list

View File

@ -27,9 +27,8 @@ class AggregateInstanceExtraSpecsFilter(object):
# Aggregate data and instance type does not change within a request # Aggregate data and instance type does not change within a request
run_filter_once_per_request = True run_filter_once_per_request = True
def __init__(self, _logger): def __init__(self):
"""Initialization.""" """Initialization."""
self.logger = _logger
def host_passes(self, _level, _host, _v): def host_passes(self, _level, _host, _v):
"""Return a list of hosts that can create instance_type.""" """Return a list of hosts that can create instance_type."""
@ -109,9 +108,8 @@ class AvailabilityZoneFilter(object):
# Availability zones do not change within a request # Availability zones do not change within a request
run_filter_once_per_request = True run_filter_once_per_request = True
def __init__(self, _logger): def __init__(self):
"""Initialization.""" """Initialization."""
self.logger = _logger
def host_passes(self, _level, _host, _v): def host_passes(self, _level, _host, _v):
"""Return True if all availalibility zones in _v exist in the host.""" """Return True if all availalibility zones in _v exist in the host."""
@ -138,9 +136,8 @@ class AvailabilityZoneFilter(object):
class RamFilter(object): class RamFilter(object):
"""RamFilter.""" """RamFilter."""
def __init__(self, _logger): def __init__(self):
"""Initialization.""" """Initialization."""
self.logger = _logger
def host_passes(self, _level, _host, _v): def host_passes(self, _level, _host, _v):
"""Return True if host has sufficient available RAM.""" """Return True if host has sufficient available RAM."""
@ -161,9 +158,8 @@ class RamFilter(object):
class CoreFilter(object): class CoreFilter(object):
"""CoreFilter.""" """CoreFilter."""
def __init__(self, _logger): def __init__(self):
"""Initialization.""" """Initialization."""
self.logger = _logger
def host_passes(self, _level, _host, _v): def host_passes(self, _level, _host, _v):
"""Return True if host has sufficient CPU cores.""" """Return True if host has sufficient CPU cores."""
@ -185,9 +181,8 @@ class CoreFilter(object):
class DiskFilter(object): class DiskFilter(object):
"""DiskFilter.""" """DiskFilter."""
def __init__(self, _logger): def __init__(self):
"""Initialization.""" """Initialization."""
self.logger = _logger
def host_passes(self, _level, _host, _v): def host_passes(self, _level, _host, _v):
"""Filter based on disk usage.""" """Filter based on disk usage."""

View File

@ -12,22 +12,25 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
from oslo_log import log
from valet.engine.optimizer.app_manager.app_topology_base import VGroup from valet.engine.optimizer.app_manager.app_topology_base import VGroup
from valet.engine.optimizer.app_manager.app_topology_base import VM from valet.engine.optimizer.app_manager.app_topology_base import VM
from valet.engine.optimizer.ostro.search import Search from valet.engine.optimizer.ostro.search import Search
LOG = log.getLogger(__name__)
# FIXME(GJ): make search algorithm pluggable # FIXME(GJ): make search algorithm pluggable
# NOTE(GJ): do not deal with Volume placements at this version # NOTE(GJ): do not deal with Volume placements at this version
class Optimizer(object): class Optimizer(object):
"""Optimizer.""" """Optimizer."""
def __init__(self, _resource, _logger): def __init__(self, _resource):
"""Initialization.""" """Initialization."""
self.resource = _resource self.resource = _resource
self.logger = _logger
self.search = Search(self.logger) self.search = Search()
self.status = "success" self.status = "success"
@ -80,8 +83,7 @@ class Optimizer(object):
elif v.level == "cluster": elif v.level == "cluster":
placement_map[v] = node_placement.cluster_name placement_map[v] = node_placement.cluster_name
self.logger.debug(" " + v.name + " placed in " + LOG.debug(v.name + " placed in " + placement_map[v])
placement_map[v])
self._update_resource_status(uuid_map) self._update_resource_status(uuid_map)

View File

@ -12,13 +12,13 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Valet Engine."""
from oslo_config import cfg
import threading import threading
import time import time
import traceback import traceback
from oslo_config import cfg
from oslo_log import log
from valet.engine.listener.listener_manager import ListenerManager from valet.engine.listener.listener_manager import ListenerManager
from valet.engine.optimizer.app_manager.app_handler import AppHandler from valet.engine.optimizer.app_manager.app_handler import AppHandler
from valet.engine.optimizer.app_manager.app_topology_base import VM from valet.engine.optimizer.app_manager.app_topology_base import VM
@ -29,37 +29,34 @@ from valet.engine.resource_manager.resource import Resource
from valet.engine.resource_manager.topology_manager import TopologyManager from valet.engine.resource_manager.topology_manager import TopologyManager
CONF = cfg.CONF CONF = cfg.CONF
LOG = log.getLogger(__name__)
class Ostro(object): class Ostro(object):
"""Valet Engine.""" """Valet Engine."""
def __init__(self, _config, _logger): def __init__(self, _config):
"""Initialization.""" """Initialization."""
self.config = _config self.config = _config
self.logger = _logger
self.db = MusicHandler(self.config, self.logger) self.db = MusicHandler(self.config)
if self.db.init_db() is False: if self.db.init_db() is False:
self.logger.error("error while initializing MUSIC database") LOG.error("error while initializing MUSIC database")
self.resource = Resource(self.db, self.config, self.logger) self.resource = Resource(self.db, self.config)
self.app_handler = AppHandler(self.resource, self.db, self.config)
self.app_handler = AppHandler(self.resource, self.db, self.config, self.optimizer = Optimizer(self.resource)
self.logger)
self.optimizer = Optimizer(self.resource, self.logger)
self.data_lock = threading.Lock() self.data_lock = threading.Lock()
self.thread_list = [] self.thread_list = []
self.topology = TopologyManager( self.topology = TopologyManager(
1, "Topology", self.resource, 1, "Topology", self.resource,
self.data_lock, self.config, self.logger) self.data_lock, self.config)
self.compute = ComputeManager( self.compute = ComputeManager(
2, "Compute", self.resource, 2, "Compute", self.resource,
self.data_lock, self.config, self.logger) self.data_lock, self.config)
self.listener = ListenerManager(3, "Listener", CONF) self.listener = ListenerManager(3, "Listener", CONF)
@ -69,7 +66,7 @@ class Ostro(object):
self.batch_store_trigger = 10 # sec self.batch_store_trigger = 10 # sec
def run_ostro(self): def run_ostro(self):
self.logger.info("start Ostro ......") LOG.info("start Ostro ......")
self.topology.start() self.topology.start()
self.compute.start() self.compute.start()
@ -114,7 +111,7 @@ class Ostro(object):
for t in self.thread_list: for t in self.thread_list:
t.join() t.join()
self.logger.info("exit Ostro") LOG.info("exit Ostro")
def stop_ostro(self): def stop_ostro(self):
"""Stop main engine process.""" """Stop main engine process."""
@ -131,22 +128,22 @@ class Ostro(object):
def bootstrap(self): def bootstrap(self):
"""Start bootstrap and update the engine's resource topology.""" """Start bootstrap and update the engine's resource topology."""
self.logger.info("Ostro.bootstrap: start bootstrap") LOG.info("Ostro.bootstrap: start bootstrap")
try: try:
resource_status = self.db.get_resource_status( resource_status = self.db.get_resource_status(
self.resource.datacenter.name) self.resource.datacenter.name)
if resource_status is None: if resource_status is None:
self.logger.error("failed to read from table: %s" % LOG.error("failed to read from table: %s" %
self.config.db_resource_table) self.config.db_resource_table)
return False return False
if len(resource_status) > 0: if len(resource_status) > 0:
self.logger.info("bootstrap from DB") LOG.info("bootstrap from DB")
if not self.resource.bootstrap_from_db(resource_status): if not self.resource.bootstrap_from_db(resource_status):
self.logger.error("failed to parse bootstrap data!") LOG.error("failed to parse bootstrap data!")
self.logger.info("bootstrap from OpenStack") LOG.info("bootstrap from OpenStack")
if not self._set_hosts(): if not self._set_hosts():
return False return False
@ -159,42 +156,42 @@ class Ostro(object):
self.resource.update_topology() self.resource.update_topology()
except Exception: except Exception:
self.logger.critical("Ostro.bootstrap failed: %s" % LOG.critical("Ostro.bootstrap failed: ",
traceback.format_exc()) traceback.format_exc())
self.logger.info("done bootstrap") LOG.info("done bootstrap")
return True return True
def _set_topology(self): def _set_topology(self):
if not self.topology.set_topology(): if not self.topology.set_topology():
self.logger.error("failed to read datacenter topology") LOG.error("failed to read datacenter topology")
return False return False
self.logger.info("done topology bootstrap") LOG.info("done topology bootstrap")
return True return True
def _set_hosts(self): def _set_hosts(self):
if not self.compute.set_hosts(): if not self.compute.set_hosts():
self.logger.error("failed to read hosts from OpenStack (Nova)") LOG.error("failed to read hosts from OpenStack (Nova)")
return False return False
self.logger.info("done hosts & groups bootstrap") LOG.info("done hosts & groups bootstrap")
return True return True
def _set_flavors(self): def _set_flavors(self):
if not self.compute.set_flavors(): if not self.compute.set_flavors():
self.logger.error("failed to read flavors from OpenStack (Nova)") LOG.error("failed to read flavors from OpenStack (Nova)")
return False return False
self.logger.info("done flavors bootstrap") LOG.info("done flavors bootstrap")
return True return True
# TODO(GJ): evaluate delay # TODO(GJ): evaluate delay
def place_app(self, _app_data): def place_app(self, _app_data):
for req in _app_data: for req in _app_data:
if req["action"] == "query": if req["action"] == "query":
self.logger.info("start query") LOG.info("start query")
query_result = self._query(req) query_result = self._query(req)
result = self._get_json_results("query", "ok", result = self._get_json_results("query", "ok",
@ -203,9 +200,9 @@ class Ostro(object):
if not self.db.put_result(result): if not self.db.put_result(result):
return False return False
self.logger.info("done query") LOG.info("done query")
else: else:
self.logger.info("start app placement") LOG.info("start app placement")
result = None result = None
(decision_key, old_decision) = self.app_handler.check_history( (decision_key, old_decision) = self.app_handler.check_history(
@ -221,14 +218,13 @@ class Ostro(object):
if decision_key is not None: if decision_key is not None:
self.app_handler.put_history(decision_key, result) self.app_handler.put_history(decision_key, result)
else: else:
self.logger.warn("decision(%s) already made" % LOG.info("decision(%s) already made" % decision_key)
decision_key)
result = old_decision result = old_decision
if not self.db.put_result(result): if not self.db.put_result(result):
return False return False
self.logger.info("done app placement") LOG.info("done app placement")
return True return True
@ -247,11 +243,11 @@ class Ostro(object):
query_result[_q["stack_id"]] = vm_list query_result[_q["stack_id"]] = vm_list
else: else:
self.status = "unknown paramenter in query" self.status = "unknown paramenter in query"
self.logger.warn("unknown paramenter in query") LOG.warning("unknown paramenter in query")
query_result[_q["stack_id"]] = None query_result[_q["stack_id"]] = None
else: else:
self.status = "no paramenter in query" self.status = "no paramenter in query"
self.logger.warn("no parameters in query") LOG.warning("no parameters in query")
query_result[_q["stack_id"]] = None query_result[_q["stack_id"]] = None
elif _q["type"] == "all_groups": elif _q["type"] == "all_groups":
self.data_lock.acquire() self.data_lock.acquire()
@ -259,11 +255,11 @@ class Ostro(object):
self.data_lock.release() self.data_lock.release()
else: else:
self.status = "unknown query type" self.status = "unknown query type"
self.logger.warn("unknown query type") LOG.warning("unknown query type")
query_result[_q["stack_id"]] = None query_result[_q["stack_id"]] = None
else: else:
self.status = "unknown type in query" self.status = "unknown type in query"
self.logger.warn("no type in query") LOG.warning("no type in query")
query_result[_q["stack_id"]] = None query_result[_q["stack_id"]] = None
return query_result return query_result
@ -284,7 +280,7 @@ class Ostro(object):
if vm_id[2] != "none": # if physical_uuid != 'none' if vm_id[2] != "none": # if physical_uuid != 'none'
vm_list.append(vm_id[2]) vm_list.append(vm_id[2])
else: else:
self.logger.warn("found pending vms in this group while query") LOG.warning("found pending vms in this group while query")
return vm_list return vm_list
@ -301,7 +297,7 @@ class Ostro(object):
app_topology = self.app_handler.add_app(_app) app_topology = self.app_handler.add_app(_app)
if app_topology is None: if app_topology is None:
self.status = self.app_handler.status self.status = self.app_handler.status
self.logger.error("Ostro._place_app: error while register" LOG.error("Ostro._place_app: error while register"
"requested apps: " + self.app_handler.status) "requested apps: " + self.app_handler.status)
return None return None
@ -309,12 +305,12 @@ class Ostro(object):
for _, vm in app_topology.vms.iteritems(): for _, vm in app_topology.vms.iteritems():
if self._set_vm_flavor_information(vm) is False: if self._set_vm_flavor_information(vm) is False:
self.status = "fail to set flavor information" self.status = "fail to set flavor information"
self.logger.error(self.status) LOG.error(self.status)
return None return None
for _, vg in app_topology.vgroups.iteritems(): for _, vg in app_topology.vgroups.iteritems():
if self._set_vm_flavor_information(vg) is False: if self._set_vm_flavor_information(vg) is False:
self.status = "fail to set flavor information in a group" self.status = "fail to set flavor information in a group"
self.logger.error(self.status) LOG.error(self.status)
return None return None
self.data_lock.acquire() self.data_lock.acquire()
@ -359,7 +355,7 @@ class Ostro(object):
flavor = self.resource.get_flavor(_vm.flavor) flavor = self.resource.get_flavor(_vm.flavor)
if flavor is None: if flavor is None:
self.logger.warn("Ostro._set_vm_flavor_properties: does not exist " LOG.warning("Ostro._set_vm_flavor_properties: does not exist "
"flavor (" + _vm.flavor + ") and try to refetch") "flavor (" + _vm.flavor + ") and try to refetch")
# Reset flavor resource and try again # Reset flavor resource and try again
@ -395,13 +391,13 @@ class Ostro(object):
for e in _event_list: for e in _event_list:
if e.host is not None and e.host != "none": if e.host is not None and e.host != "none":
if self._check_host(e.host) is False: if self._check_host(e.host) is False:
self.logger.warn("Ostro.handle_events: host (" + e.host + LOG.warning("Ostro.handle_events: host (" + e.host +
") related to this event not exists") ") related to this event not exists")
continue continue
if e.method == "build_and_run_instance": if e.method == "build_and_run_instance":
# VM is created (from stack) # VM is created (from stack)
self.logger.info("Ostro.handle_events: got build_and_run " LOG.info("Ostro.handle_events: got build_and_run "
"event for %s" % e.uuid) "event for %s" % e.uuid)
if self.db.put_uuid(e) is False: if self.db.put_uuid(e) is False:
self.data_lock.release() self.data_lock.release()
@ -417,12 +413,12 @@ class Ostro(object):
return False return False
if e.vm_state == "active": if e.vm_state == "active":
self.logger.info("Ostro.handle_events: got instance_" LOG.info("Ostro.handle_events: got instance_"
"active event for " + e.uuid) "active event for " + e.uuid)
vm_info = self.app_handler.get_vm_info( vm_info = self.app_handler.get_vm_info(
orch_id[1], orch_id[0], e.host) orch_id[1], orch_id[0], e.host)
if vm_info is None: if vm_info is None:
self.logger.error("Ostro.handle_events: error " LOG.error("Ostro.handle_events: error "
"while getting app info " "while getting app info "
"from MUSIC") "from MUSIC")
self.data_lock.release() self.data_lock.release()
@ -431,7 +427,7 @@ class Ostro(object):
if len(vm_info) == 0: if len(vm_info) == 0:
# Stack not found because vm is created by the # Stack not found because vm is created by the
# other stack # other stack
self.logger.warn("EVENT: no vm_info found in app " LOG.warning("EVENT: no vm_info found in app "
"placement record") "placement record")
self._add_vm_to_host( self._add_vm_to_host(
e.uuid, orch_id[0], e.host, e.vcpus, e.uuid, orch_id[0], e.host, e.vcpus,
@ -440,7 +436,7 @@ class Ostro(object):
if ("planned_host" in vm_info.keys() and if ("planned_host" in vm_info.keys() and
vm_info["planned_host"] != e.host): vm_info["planned_host"] != e.host):
# VM is activated in the different host # VM is activated in the different host
self.logger.warn("EVENT: vm activated in the " LOG.warning("EVENT: vm activated in the "
"different host") "different host")
self._add_vm_to_host( self._add_vm_to_host(
e.uuid, orch_id[0], e.host, e.vcpus, e.uuid, orch_id[0], e.host, e.vcpus,
@ -461,7 +457,7 @@ class Ostro(object):
# Possibly the vm deleted in the host while # Possibly the vm deleted in the host while
# batch cleanup # batch cleanup
if not self._check_h_uuid(orch_id[0], e.host): if not self._check_h_uuid(orch_id[0], e.host):
self.logger.warn("EVENT: planned vm was " LOG.warning("EVENT: planned vm was "
"deleted") "deleted")
if self._check_uuid(e.uuid, e.host): if self._check_uuid(e.uuid, e.host):
self._update_h_uuid_in_host(orch_id[0], self._update_h_uuid_in_host(orch_id[0],
@ -470,7 +466,7 @@ class Ostro(object):
self._update_h_uuid_in_logical_groups( self._update_h_uuid_in_logical_groups(
orch_id[0], e.uuid, e.host) orch_id[0], e.uuid, e.host)
else: else:
self.logger.info( LOG.info(
"EVENT: vm activated as planned") "EVENT: vm activated as planned")
self._update_uuid_in_host( self._update_uuid_in_host(
orch_id[0], e.uuid, e.host) orch_id[0], e.uuid, e.host)
@ -480,7 +476,7 @@ class Ostro(object):
resource_updated = True resource_updated = True
elif e.vm_state == "deleted": elif e.vm_state == "deleted":
self.logger.info("EVENT: got instance_delete for %s" % LOG.info("EVENT: got instance_delete for %s" %
e.uuid) e.uuid)
self._remove_vm_from_host( self._remove_vm_from_host(
@ -491,7 +487,7 @@ class Ostro(object):
if not self.app_handler.update_vm_info( if not self.app_handler.update_vm_info(
orch_id[1], orch_id[0]): orch_id[1], orch_id[0]):
self.logger.error("EVENT: error while updating " LOG.error("EVENT: error while updating "
"app in MUSIC") "app in MUSIC")
self.data_lock.release() self.data_lock.release()
return False return False
@ -499,16 +495,16 @@ class Ostro(object):
resource_updated = True resource_updated = True
else: else:
self.logger.warn("Ostro.handle_events: unknown vm_" LOG.warning("Ostro.handle_events: unknown vm_"
"state = " + e.vm_state) "state = " + e.vm_state)
elif e.object_name == 'ComputeNode': elif e.object_name == 'ComputeNode':
# Host resource is updated # Host resource is updated
self.logger.debug("Ostro.handle_events: got compute event") LOG.debug("Ostro.handle_events: got compute event")
elif e.object_name == 'ComputeNode': elif e.object_name == 'ComputeNode':
# Host resource is updated # Host resource is updated
self.logger.info("EVENT: got compute for " + e.host) LOG.info("EVENT: got compute for " + e.host)
# NOTE: what if host is disabled? # NOTE: what if host is disabled?
if self.resource.update_host_resources( if self.resource.update_host_resources(
e.host, e.status, e.vcpus, e.vcpus_used, e.mem, e.host, e.status, e.vcpus, e.vcpus_used, e.mem,
@ -519,10 +515,10 @@ class Ostro(object):
resource_updated = True resource_updated = True
else: else:
self.logger.warn("Ostro.handle_events: unknown object_" LOG.warning("Ostro.handle_events: unknown object_"
"name = " + e.object_name) "name = " + e.object_name)
else: else:
self.logger.warn("Ostro.handle_events: unknown event " LOG.warning("Ostro.handle_events: unknown event "
"method = " + e.method) "method = " + e.method)
if resource_updated is True: if resource_updated is True:
@ -577,7 +573,7 @@ class Ostro(object):
_local_disk) _local_disk)
self.resource.update_host_time(_host_name) self.resource.update_host_time(_host_name)
else: else:
self.logger.warn("vm (%s) is missing while removing" % _uuid) LOG.warning("vm (%s) is missing while removing" % _uuid)
def _remove_vm_from_logical_groups(self, _uuid, _h_uuid, _host_name): def _remove_vm_from_logical_groups(self, _uuid, _h_uuid, _host_name):
host = self.resource.hosts[_host_name] host = self.resource.hosts[_host_name]
@ -618,7 +614,7 @@ class Ostro(object):
if host.update_uuid(_h_uuid, _uuid) is True: if host.update_uuid(_h_uuid, _uuid) is True:
self.resource.update_host_time(_host_name) self.resource.update_host_time(_host_name)
else: else:
self.logger.warn("Ostro._update_uuid_in_host: fail to update uuid " LOG.warning("Ostro._update_uuid_in_host: fail to update uuid "
"in host = %s" % host.name) "in host = %s" % host.name)
def _update_h_uuid_in_host(self, _h_uuid, _uuid, _host_name): def _update_h_uuid_in_host(self, _h_uuid, _uuid, _host_name):

View File

@ -15,6 +15,8 @@
import copy import copy
import operator import operator
from oslo_log import log
from valet.engine.optimizer.app_manager.app_topology_base import LEVELS from valet.engine.optimizer.app_manager.app_topology_base import LEVELS
from valet.engine.optimizer.app_manager.app_topology_base import VGroup from valet.engine.optimizer.app_manager.app_topology_base import VGroup
from valet.engine.optimizer.app_manager.app_topology_base import VM from valet.engine.optimizer.app_manager.app_topology_base import VM
@ -24,13 +26,14 @@ from valet.engine.optimizer.ostro.search_base import Node
from valet.engine.optimizer.ostro.search_base import Resource from valet.engine.optimizer.ostro.search_base import Resource
from valet.engine.resource_manager.resource_base import Datacenter from valet.engine.resource_manager.resource_base import Datacenter
LOG = log.getLogger(__name__)
class Search(object): class Search(object):
'''A bin-packing with maximal consolidation approach ''' '''A bin-packing with maximal consolidation approach '''
def __init__(self, _logger): def __init__(self):
"""Initialization.""" """Initialization."""
self.logger = _logger
# search inputs # search inputs
self.resource = None self.resource = None
@ -92,9 +95,9 @@ class Search(object):
self.resource = _resource self.resource = _resource
self.constraint_solver = ConstraintSolver(self.logger) self.constraint_solver = ConstraintSolver()
self.logger.info("start search") LOG.info("start search")
self._create_avail_logical_groups() self._create_avail_logical_groups()
self._create_avail_hosts() self._create_avail_hosts()
@ -118,9 +121,9 @@ class Search(object):
self.app_topology = _app_topology self.app_topology = _app_topology
self.resource = _resource self.resource = _resource
self.constraint_solver = ConstraintSolver(self.logger) self.constraint_solver = ConstraintSolver()
self.logger.info("start search for replan") LOG.info("start search for replan")
self._create_avail_logical_groups() self._create_avail_logical_groups()
self._create_avail_hosts() self._create_avail_hosts()
@ -130,7 +133,7 @@ class Search(object):
self._compute_resource_weights() self._compute_resource_weights()
self.logger.info("first, place already-planned nodes") LOG.info("first, place already-planned nodes")
# reconsider all vms to be migrated together # reconsider all vms to be migrated together
if len(_app_topology.exclusion_list_map) > 0: if len(_app_topology.exclusion_list_map) > 0:
@ -138,17 +141,17 @@ class Search(object):
if self._place_planned_nodes() is False: if self._place_planned_nodes() is False:
self.status = "cannot replan VMs that was planned" self.status = "cannot replan VMs that was planned"
self.logger.error(self.status) LOG.error(self.status)
return False return False
self.logger.info("second, re-place not-planned nodes") LOG.info("second, re-place not-planned nodes")
init_level = LEVELS[len(LEVELS) - 1] init_level = LEVELS[len(LEVELS) - 1]
(open_node_list, level) = self._open_list(self.app_topology.vms, (open_node_list, level) = self._open_list(self.app_topology.vms,
self.app_topology.vgroups, self.app_topology.vgroups,
init_level) init_level)
if open_node_list is None: if open_node_list is None:
self.logger.error("fail to replan") LOG.error("fail to replan")
return False return False
for v, ah in self.planned_placements.iteritems(): for v, ah in self.planned_placements.iteritems():
@ -169,7 +172,7 @@ class Search(object):
if vk in self.app_topology.planned_vm_map.keys(): if vk in self.app_topology.planned_vm_map.keys():
del self.app_topology.planned_vm_map[vk] del self.app_topology.planned_vm_map[vk]
else: else:
self.logger.error("Search: migrated " + migrated_vm_id + LOG.error("Search: migrated " + migrated_vm_id +
" is missing while replan") " is missing while replan")
def _get_child_vms(self, _g, _vm_list, _e_vmk): def _get_child_vms(self, _g, _vm_list, _e_vmk):
@ -212,7 +215,7 @@ class Search(object):
vgroup.host = [] vgroup.host = []
host_name = self._get_host_of_vgroup(hk, vgroup.level) host_name = self._get_host_of_vgroup(hk, vgroup.level)
if host_name is None: if host_name is None:
self.logger.error("Search: host does not exist while " LOG.warning("Search: host does not exist while "
"replan with vgroup") "replan with vgroup")
else: else:
if host_name not in vgroup.host: if host_name not in vgroup.host:
@ -301,7 +304,7 @@ class Search(object):
self._deduct_reservation(_level, best_resource, n) self._deduct_reservation(_level, best_resource, n)
self._close_planned_placement(_level, best_resource, n.node) self._close_planned_placement(_level, best_resource, n.node)
else: else:
self.logger.error("fail to place already-planned VMs") LOG.error("fail to place already-planned VMs")
return False return False
return True return True
@ -332,7 +335,7 @@ class Search(object):
host_name = self._get_host_of_level(_n, _level) host_name = self._get_host_of_level(_n, _level)
if host_name is None: if host_name is None:
self.logger.warn("cannot find host while replanning") LOG.warning("cannot find host while replanning")
return None return None
avail_hosts = {} avail_hosts = {}
@ -385,7 +388,7 @@ class Search(object):
for hk, host in self.resource.hosts.iteritems(): for hk, host in self.resource.hosts.iteritems():
if host.check_availability() is False: if host.check_availability() is False:
self.logger.debug("Search: host (" + host.name + LOG.debug("Search: host (" + host.name +
") not available at this time") ") not available at this time")
continue continue
@ -460,7 +463,7 @@ class Search(object):
for lgk, lg in self.resource.logical_groups.iteritems(): for lgk, lg in self.resource.logical_groups.iteritems():
if lg.status != "enabled": if lg.status != "enabled":
self.logger.warn("group (" + lg.name + ") disabled") LOG.warning("group (" + lg.name + ") disabled")
continue continue
lgr = LogicalGroupResource() lgr = LogicalGroupResource()
@ -715,7 +718,7 @@ class Search(object):
if host_name not in host_list: if host_name not in host_list:
host_list.append(host_name) host_list.append(host_name)
else: else:
self.logger.warn("Search: cannot find candidate " LOG.warning("Search: cannot find candidate "
"host while replanning") "host while replanning")
_n.node.host = host_list _n.node.host = host_list
@ -779,7 +782,7 @@ class Search(object):
else: else:
debug_candidate_name = cr.get_resource_name(_level) debug_candidate_name = cr.get_resource_name(_level)
msg = "rollback of candidate resource = {0}" msg = "rollback of candidate resource = {0}"
self.logger.warn(msg.format(debug_candidate_name)) LOG.warning(msg.format(debug_candidate_name))
if planned_host is None: if planned_host is None:
# recursively rollback deductions of all # recursively rollback deductions of all
@ -792,7 +795,7 @@ class Search(object):
if best_resource is None and len(candidate_list) == 0: if best_resource is None and len(candidate_list) == 0:
self.status = "no available hosts" self.status = "no available hosts"
self.logger.warn(self.status) LOG.warning(self.status)
return best_resource return best_resource
@ -858,7 +861,7 @@ class Search(object):
lgr.group_type = "EX" lgr.group_type = "EX"
self.avail_logical_groups[lgr.name] = lgr self.avail_logical_groups[lgr.name] = lgr
self.logger.info( LOG.info(
"Search: add new exclusivity (%s)" % _exclusivity_id) "Search: add new exclusivity (%s)" % _exclusivity_id)
else: else:
@ -913,7 +916,7 @@ class Search(object):
lgr.group_type = "AFF" lgr.group_type = "AFF"
self.avail_logical_groups[lgr.name] = lgr self.avail_logical_groups[lgr.name] = lgr
self.logger.info("add new affinity (" + _affinity_id + ")") LOG.info("add new affinity (" + _affinity_id + ")")
else: else:
lgr = self.avail_logical_groups[_affinity_id] lgr = self.avail_logical_groups[_affinity_id]
@ -963,8 +966,8 @@ class Search(object):
lgr.group_type = "DIV" lgr.group_type = "DIV"
self.avail_logical_groups[lgr.name] = lgr self.avail_logical_groups[lgr.name] = lgr
self.logger.info( LOG.info(
"Search: add new diversity (%s)" % _diversity_id) "Search: add new diversity (%s)", _diversity_id)
else: else:
lgr = self.avail_logical_groups[_diversity_id] lgr = self.avail_logical_groups[_diversity_id]

View File

@ -17,12 +17,15 @@
import atexit import atexit
import os import os
from oslo_config import cfg import signal
from signal import SIGTERM
import sys import sys
import time import time
from oslo_config import cfg
from oslo_log import log
CONF = cfg.CONF CONF = cfg.CONF
LOG = log.getLogger(__name__)
class Daemon(object): class Daemon(object):
@ -31,7 +34,7 @@ class Daemon(object):
"""Usage: subclass the Daemon class and override the run() method """Usage: subclass the Daemon class and override the run() method
""" """
def __init__(self, priority, pidfile, logger, stdin='/dev/null', def __init__(self, priority, pidfile, stdin='/dev/null',
stdout='/dev/null', stderr='/dev/null'): stdout='/dev/null', stderr='/dev/null'):
"""Initialization.""" """Initialization."""
self.stdin = stdin self.stdin = stdin
@ -39,7 +42,6 @@ class Daemon(object):
self.stderr = stderr self.stderr = stderr
self.pidfile = pidfile self.pidfile = pidfile
self.priority = priority self.priority = priority
self.logger = logger
def daemonize(self): def daemonize(self):
"""Do the UNIX double-fork magic.""" """Do the UNIX double-fork magic."""
@ -53,9 +55,9 @@ class Daemon(object):
# exit first parent # exit first parent
sys.exit(0) sys.exit(0)
except OSError as e: except OSError as e:
self.logger.error("Daemon error at step1: " + e.strerror) LOG.error("Daemon error at step1: ", e.strerror)
sys.stderr.write("fork #1 failed: %d (%s)\n" % LOG.error("fork #1 failed: %d (%s)\n",
(e.errno, e.strerror)) e.errno, e.strerror)
sys.exit(1) sys.exit(1)
# decouple from parent environment # decouple from parent environment
@ -70,9 +72,9 @@ class Daemon(object):
# exit from second parent # exit from second parent
sys.exit(0) sys.exit(0)
except OSError as e: except OSError as e:
self.logger.error("Daemon error at step2: " + e.strerror) LOG.error("Daemon error at step2: ", e.strerror)
sys.stderr.write("fork #2 failed: %d (%s)\n" % LOG.error("fork #2 failed: %d (%s)\n",
(e.errno, e.strerror)) e.errno, e.strerror)
sys.exit(1) sys.exit(1)
# redirect standard file descriptors # redirect standard file descriptors
@ -140,7 +142,7 @@ class Daemon(object):
# Try killing the daemon process # Try killing the daemon process
try: try:
while 1: while 1:
os.kill(pid, SIGTERM) os.kill(pid, signal.SIGTERM)
time.sleep(0.1) time.sleep(0.1)
except OSError as err: except OSError as err:
err = str(err) err = str(err)

View File

@ -26,6 +26,7 @@ from valet.common.music import REST
from valet.engine.conf import init_engine from valet.engine.conf import init_engine
CONF = cfg.CONF CONF = cfg.CONF
LOG = get_logger(__name__)
class HealthCheck(object): class HealthCheck(object):
@ -107,7 +108,7 @@ class HealthCheck(object):
engine_id = placement['resources']['id'] engine_id = placement['resources']['id']
break break
except Exception as e: except Exception as e:
logger.warn("HealthCheck exception in read response " + str(e)) LOG.warning("HealthCheck exception in read response, ", str(e))
return engine_id return engine_id
@ -126,7 +127,7 @@ class HealthCheck(object):
} }
self.rest.request(method='delete', path=path, data=data) self.rest.request(method='delete', path=path, data=data)
except Exception as e: except Exception as e:
logger.warn("HealthCheck exception in delete request - " + str(e)) LOG.warning("HealthCheck exception in delete request, ", str(e))
try: try:
path = base % { path = base % {
@ -136,7 +137,7 @@ class HealthCheck(object):
} }
self.rest.request(method='delete', path=path, data=data) self.rest.request(method='delete', path=path, data=data)
except Exception as e: except Exception as e:
logger.warn("HealthCheck exception in delete response - " + str(e)) LOG.warning("HealthCheck exception in delete response, ", str(e))
if __name__ == "__main__": if __name__ == "__main__":
@ -144,20 +145,19 @@ if __name__ == "__main__":
respondent_id = None respondent_id = None
code = 0 code = 0
init_engine(default_config_files=['/etc/valet/valet.conf']) init_engine(default_config_files=['/etc/valet/valet.conf'])
logger = get_logger("ostro_daemon")
if os.path.exists(CONF.engine.pid): if os.path.exists(CONF.engine.pid):
respondent_id = HealthCheck().ping() respondent_id = HealthCheck().ping()
if respondent_id == CONF.engine.priority: if respondent_id == CONF.engine.priority:
code = CONF.engine.priority code = CONF.engine.priority
logger.info("HealthCheck - Alive, " LOG.info("HealthCheck - Alive, "
"respondent instance id: {}".format(respondent_id)) "respondent instance id: {}".format(respondent_id))
else: else:
logger.warn("HealthCheck - pid file exists, " LOG.warning("HealthCheck - pid file exists, "
"engine {} did not respond in a timely manner " "engine {} did not respond in a timely manner "
"(respondent id {})".format(CONF.engine.priority, "(respondent id {})".format(CONF.engine.priority,
respondent_id)) respondent_id))
else: else:
logger.info("HealthCheck - no pid file, engine is not running!") LOG.info("HealthCheck - no pid file, engine is not running!")
sys.exit(code) sys.exit(code)

View File

@ -16,6 +16,7 @@ import traceback
from novaclient import client as nova_client from novaclient import client as nova_client
from oslo_config import cfg from oslo_config import cfg
from oslo_log import log
from resource_base import Flavor from resource_base import Flavor
from resource_base import Host from resource_base import Host
@ -26,6 +27,7 @@ from resource_base import LogicalGroup
VERSION = 2 VERSION = 2
CONF = cfg.CONF CONF = cfg.CONF
LOG = log.getLogger(__name__)
class Compute(object): class Compute(object):
@ -37,9 +39,8 @@ class Compute(object):
Interacts with nova client to perform these actions. Interacts with nova client to perform these actions.
""" """
def __init__(self, _logger): def __init__(self):
"""Compute init.""" """Compute init."""
self.logger = _logger
self.nova = None self.nova = None
def set_hosts(self, _hosts, _logical_groups): def set_hosts(self, _hosts, _logical_groups):
@ -48,22 +49,22 @@ class Compute(object):
status = self._set_availability_zones(_hosts, _logical_groups) status = self._set_availability_zones(_hosts, _logical_groups)
if status != "success": if status != "success":
self.logger.error('_set_availability_zones failed') LOG.error('_set_availability_zones failed')
return status return status
status = self._set_aggregates(_hosts, _logical_groups) status = self._set_aggregates(_hosts, _logical_groups)
if status != "success": if status != "success":
self.logger.error('_set_aggregates failed') LOG.error('_set_aggregates failed')
return status return status
status = self._set_placed_vms(_hosts, _logical_groups) status = self._set_placed_vms(_hosts, _logical_groups)
if status != "success": if status != "success":
self.logger.error('_set_placed_vms failed') LOG.error('_set_placed_vms failed')
return status return status
status = self._set_resources(_hosts) status = self._set_resources(_hosts)
if status != "success": if status != "success":
self.logger.error('_set_resources failed') LOG.error('_set_resources failed')
return status return status
return "success" return "success"
@ -102,11 +103,11 @@ class Compute(object):
_hosts[host.name] = host _hosts[host.name] = host
except (ValueError, KeyError, TypeError): except (ValueError, KeyError, TypeError):
self.logger.error(traceback.format_exc()) LOG.error(traceback.format_exc())
return "Error while setting host zones from Nova" return "Error while setting host zones from Nova"
except Exception: except Exception:
self.logger.critical(traceback.format_exc()) LOG.critical(traceback.format_exc())
return "success" return "success"
@ -134,7 +135,7 @@ class Compute(object):
aggregate.vms_per_host[host.name] = [] aggregate.vms_per_host[host.name] = []
except (ValueError, KeyError, TypeError): except (ValueError, KeyError, TypeError):
self.logger.error(traceback.format_exc()) LOG.error(traceback.format_exc())
return "Error while setting host aggregates from Nova" return "Error while setting host aggregates from Nova"
return "success" return "success"
@ -182,7 +183,7 @@ class Compute(object):
_vm_list.append(s['uuid']) _vm_list.append(s['uuid'])
except (ValueError, KeyError, TypeError): except (ValueError, KeyError, TypeError):
self.logger.error(traceback.format_exc()) LOG.error(traceback.format_exc())
return "Error while getting existing vms" return "Error while getting existing vms"
return "success" return "success"
@ -201,7 +202,7 @@ class Compute(object):
_vm_detail.append(status) _vm_detail.append(status)
except (ValueError, KeyError, TypeError): except (ValueError, KeyError, TypeError):
self.logger.error(traceback.format_exc()) LOG.error(traceback.format_exc())
return "Error while getting vm detail" return "Error while getting vm detail"
return "success" return "success"
@ -226,7 +227,7 @@ class Compute(object):
host.disk_available_least = float(hv.disk_available_least) host.disk_available_least = float(hv.disk_available_least)
except (ValueError, KeyError, TypeError): except (ValueError, KeyError, TypeError):
self.logger.error(traceback.format_exc()) LOG.error(traceback.format_exc())
return "Error while setting host resources from Nova" return "Error while setting host resources from Nova"
return "success" return "success"
@ -287,7 +288,7 @@ class Compute(object):
_flavors[flavor.name] = flavor _flavors[flavor.name] = flavor
except (ValueError, KeyError, TypeError): except (ValueError, KeyError, TypeError):
self.logger.error(traceback.format_exc()) LOG.error(traceback.format_exc())
return "Error while getting flavors" return "Error while getting flavors"
return "success" return "success"
@ -308,7 +309,7 @@ class Compute(object):
break break
except (ValueError, KeyError, TypeError): except (ValueError, KeyError, TypeError):
self.logger.error(traceback.format_exc()) LOG.error(traceback.format_exc())
return "Error while getting flavor extra spec" return "Error while getting flavor extra spec"
return "success" return "success"

View File

@ -15,13 +15,17 @@
"""Compute Manager.""" """Compute Manager."""
from copy import deepcopy
import threading import threading
import time import time
from copy import deepcopy from oslo_log import log
from valet.engine.resource_manager.compute import Compute from valet.engine.resource_manager.compute import Compute
from valet.engine.resource_manager.resource_base import Host from valet.engine.resource_manager.resource_base import Host
LOG = log.getLogger(__name__)
class ComputeManager(threading.Thread): class ComputeManager(threading.Thread):
"""Compute Manager Class. """Compute Manager Class.
@ -30,7 +34,7 @@ class ComputeManager(threading.Thread):
flavors, etc. Calls many functions from Resource. flavors, etc. Calls many functions from Resource.
""" """
def __init__(self, _t_id, _t_name, _rsc, _data_lock, _config, _logger): def __init__(self, _t_id, _t_name, _rsc, _data_lock, _config):
"""Init Compute Manager.""" """Init Compute Manager."""
threading.Thread.__init__(self) threading.Thread.__init__(self)
@ -43,8 +47,6 @@ class ComputeManager(threading.Thread):
self.config = _config self.config = _config
self.logger = _logger
self.admin_token = None self.admin_token = None
self.project_token = None self.project_token = None
@ -52,7 +54,7 @@ class ComputeManager(threading.Thread):
def run(self): def run(self):
"""Start Compute Manager thread to run setup.""" """Start Compute Manager thread to run setup."""
self.logger.info("ComputeManager: start " + self.thread_name + LOG.info("ComputeManager: start " + self.thread_name +
" ......") " ......")
if self.config.compute_trigger_freq > 0: if self.config.compute_trigger_freq > 0:
@ -71,20 +73,20 @@ class ComputeManager(threading.Thread):
self.config.compute_trigger_freq) self.config.compute_trigger_freq)
# NOTE(GJ): do not timer based batch # NOTE(GJ): do not timer based batch
self.logger.info("exit compute_manager " + self.thread_name) LOG.info("exit compute_manager " + self.thread_name)
def _run(self): def _run(self):
self.logger.info("ComputeManager: --- start compute_nodes " LOG.info("ComputeManager: --- start compute_nodes "
"status update ---") "status update ---")
triggered_host_updates = self.set_hosts() triggered_host_updates = self.set_hosts()
if triggered_host_updates is not True: if triggered_host_updates is not True:
self.logger.warn("fail to set hosts from nova") LOG.warning("fail to set hosts from nova")
triggered_flavor_updates = self.set_flavors() triggered_flavor_updates = self.set_flavors()
if triggered_flavor_updates is not True: if triggered_flavor_updates is not True:
self.logger.warn("fail to set flavor from nova") LOG.warning("fail to set flavor from nova")
self.logger.info("ComputeManager: --- done compute_nodes " LOG.info("ComputeManager: --- done compute_nodes "
"status update ---") "status update ---")
return True return True
@ -94,7 +96,7 @@ class ComputeManager(threading.Thread):
hosts = {} hosts = {}
logical_groups = {} logical_groups = {}
compute = Compute(self.logger) compute = Compute()
status = compute.set_hosts(hosts, logical_groups) status = compute.set_hosts(hosts, logical_groups)
if status != "success": if status != "success":
@ -125,7 +127,7 @@ class ComputeManager(threading.Thread):
_logical_groups[lk]) _logical_groups[lk])
self.resource.logical_groups[lk].last_update = time.time() self.resource.logical_groups[lk].last_update = time.time()
self.logger.warn("ComputeManager: new logical group (" + LOG.warning("ComputeManager: new logical group (" +
lk + ") added") lk + ") added")
updated = True updated = True
@ -137,7 +139,7 @@ class ComputeManager(threading.Thread):
self.resource.logical_groups[rlk].status = "disabled" self.resource.logical_groups[rlk].status = "disabled"
self.resource.logical_groups[rlk].last_update = time.time() self.resource.logical_groups[rlk].last_update = time.time()
self.logger.warn("ComputeManager: logical group (" + LOG.warning("ComputeManager: logical group (" +
rlk + ") removed") rlk + ") removed")
updated = True updated = True
@ -149,7 +151,7 @@ class ComputeManager(threading.Thread):
if self._check_logical_group_metadata_update(lg, rlg) is True: if self._check_logical_group_metadata_update(lg, rlg) is True:
rlg.last_update = time.time() rlg.last_update = time.time()
self.logger.warn("ComputeManager: logical group (" + LOG.warning("ComputeManager: logical group (" +
lk + ") updated") lk + ") updated")
updated = True updated = True
@ -193,7 +195,7 @@ class ComputeManager(threading.Thread):
self.resource.hosts[new_host.name] = new_host self.resource.hosts[new_host.name] = new_host
new_host.last_update = time.time() new_host.last_update = time.time()
self.logger.warn("ComputeManager: new host (" + LOG.warning("ComputeManager: new host (" +
new_host.name + ") added") new_host.name + ") added")
updated = True updated = True
@ -203,7 +205,7 @@ class ComputeManager(threading.Thread):
rhost.tag.remove("nova") rhost.tag.remove("nova")
rhost.last_update = time.time() rhost.last_update = time.time()
self.logger.warn("ComputeManager: host (" + LOG.warning("ComputeManager: host (" +
rhost.name + ") disabled") rhost.name + ") disabled")
updated = True updated = True
@ -217,7 +219,7 @@ class ComputeManager(threading.Thread):
for hk, h in self.resource.hosts.iteritems(): for hk, h in self.resource.hosts.iteritems():
if h.clean_memberships() is True: if h.clean_memberships() is True:
h.last_update = time.time() h.last_update = time.time()
self.logger.warn("ComputeManager: host (" + h.name + LOG.warning("ComputeManager: host (" + h.name +
") updated (delete EX/AFF/DIV membership)") ") updated (delete EX/AFF/DIV membership)")
updated = True updated = True
@ -247,19 +249,19 @@ class ComputeManager(threading.Thread):
if "nova" not in _rhost.tag: if "nova" not in _rhost.tag:
_rhost.tag.append("nova") _rhost.tag.append("nova")
topology_updated = True topology_updated = True
self.logger.warn("ComputeManager: host (" + _rhost.name + LOG.warning("ComputeManager: host (" + _rhost.name +
") updated (tag added)") ") updated (tag added)")
if _host.status != _rhost.status: if _host.status != _rhost.status:
_rhost.status = _host.status _rhost.status = _host.status
topology_updated = True topology_updated = True
self.logger.warn("ComputeManager: host (" + _rhost.name + LOG.warning("ComputeManager: host (" + _rhost.name +
") updated (status changed)") ") updated (status changed)")
if _host.state != _rhost.state: if _host.state != _rhost.state:
_rhost.state = _host.state _rhost.state = _host.state
topology_updated = True topology_updated = True
self.logger.warn("ComputeManager: host (" + _rhost.name + LOG.warning("ComputeManager: host (" + _rhost.name +
") updated (state changed)") ") updated (state changed)")
return topology_updated return topology_updated
@ -274,7 +276,7 @@ class ComputeManager(threading.Thread):
_rhost.original_vCPUs = _host.original_vCPUs _rhost.original_vCPUs = _host.original_vCPUs
_rhost.avail_vCPUs = _host.avail_vCPUs _rhost.avail_vCPUs = _host.avail_vCPUs
topology_updated = True topology_updated = True
self.logger.warn("ComputeManager: host (" + _rhost.name + LOG.warning("ComputeManager: host (" + _rhost.name +
") updated (CPU updated)") ") updated (CPU updated)")
if _host.mem_cap != _rhost.mem_cap or \ if _host.mem_cap != _rhost.mem_cap or \
@ -284,7 +286,7 @@ class ComputeManager(threading.Thread):
_rhost.original_mem_cap = _host.original_mem_cap _rhost.original_mem_cap = _host.original_mem_cap
_rhost.avail_mem_cap = _host.avail_mem_cap _rhost.avail_mem_cap = _host.avail_mem_cap
topology_updated = True topology_updated = True
self.logger.warn("ComputeManager: host (" + _rhost.name + LOG.warning("ComputeManager: host (" + _rhost.name +
") updated (mem updated)") ") updated (mem updated)")
if _host.local_disk_cap != _rhost.local_disk_cap or \ if _host.local_disk_cap != _rhost.local_disk_cap or \
@ -294,7 +296,7 @@ class ComputeManager(threading.Thread):
_rhost.original_local_disk_cap = _host.original_local_disk_cap _rhost.original_local_disk_cap = _host.original_local_disk_cap
_rhost.avail_local_disk_cap = _host.avail_local_disk_cap _rhost.avail_local_disk_cap = _host.avail_local_disk_cap
topology_updated = True topology_updated = True
self.logger.warn("ComputeManager: host (" + _rhost.name + LOG.warning("ComputeManager: host (" + _rhost.name +
") updated (local disk space updated)") ") updated (local disk space updated)")
if _host.vCPUs_used != _rhost.vCPUs_used or \ if _host.vCPUs_used != _rhost.vCPUs_used or \
@ -306,7 +308,7 @@ class ComputeManager(threading.Thread):
_rhost.free_disk_gb = _host.free_disk_gb _rhost.free_disk_gb = _host.free_disk_gb
_rhost.disk_available_least = _host.disk_available_least _rhost.disk_available_least = _host.disk_available_least
topology_updated = True topology_updated = True
self.logger.warn("ComputeManager: host (" + _rhost.name + LOG.warning("ComputeManager: host (" + _rhost.name +
") updated (other resource numbers)") ") updated (other resource numbers)")
return topology_updated return topology_updated
@ -318,7 +320,7 @@ class ComputeManager(threading.Thread):
if mk not in _rhost.memberships.keys(): if mk not in _rhost.memberships.keys():
_rhost.memberships[mk] = self.resource.logical_groups[mk] _rhost.memberships[mk] = self.resource.logical_groups[mk]
topology_updated = True topology_updated = True
self.logger.warn("ComputeManager: host (" + _rhost.name + LOG.warning("ComputeManager: host (" + _rhost.name +
") updated (new membership)") ") updated (new membership)")
for mk in _rhost.memberships.keys(): for mk in _rhost.memberships.keys():
@ -328,7 +330,7 @@ class ComputeManager(threading.Thread):
if mk not in _host.memberships.keys(): if mk not in _host.memberships.keys():
del _rhost.memberships[mk] del _rhost.memberships[mk]
topology_updated = True topology_updated = True
self.logger.warn("ComputeManager: host (" + _rhost.name + LOG.warning("ComputeManager: host (" + _rhost.name +
") updated (delete membership)") ") updated (delete membership)")
return topology_updated return topology_updated
@ -343,7 +345,7 @@ class ComputeManager(threading.Thread):
if alen != blen: if alen != blen:
topology_updated = True topology_updated = True
msg = "host ({0}) {1} none vms removed" msg = "host ({0}) {1} none vms removed"
self.logger.warn(msg.format(_rhost.name, str(blen - alen))) LOG.warning(msg.format(_rhost.name, str(blen - alen)))
self.resource.clean_none_vms_from_logical_groups(_rhost) self.resource.clean_none_vms_from_logical_groups(_rhost)
@ -351,7 +353,7 @@ class ComputeManager(threading.Thread):
if _rhost.exist_vm_by_uuid(vm_id[2]) is False: if _rhost.exist_vm_by_uuid(vm_id[2]) is False:
_rhost.vm_list.append(vm_id) _rhost.vm_list.append(vm_id)
topology_updated = True topology_updated = True
self.logger.warn("ComputeManager: host (" + _rhost.name + LOG.warning("ComputeManager: host (" + _rhost.name +
") updated (new vm placed)") ") updated (new vm placed)")
for rvm_id in _rhost.vm_list: for rvm_id in _rhost.vm_list:
@ -359,7 +361,7 @@ class ComputeManager(threading.Thread):
self.resource.remove_vm_by_uuid_from_logical_groups( self.resource.remove_vm_by_uuid_from_logical_groups(
_rhost, rvm_id[2]) _rhost, rvm_id[2])
topology_updated = True topology_updated = True
self.logger.warn("ComputeManager: host (" + _rhost.name + LOG.warning("ComputeManager: host (" + _rhost.name +
") updated (vm removed)") ") updated (vm removed)")
blen = len(_rhost.vm_list) blen = len(_rhost.vm_list)
@ -369,7 +371,7 @@ class ComputeManager(threading.Thread):
if alen != blen: if alen != blen:
topology_updated = True topology_updated = True
msg = "host ({0}) {1} vms removed" msg = "host ({0}) {1} vms removed"
self.logger.warn(msg.format(_rhost.name, str(blen - alen))) LOG.warning(msg.format(_rhost.name, str(blen - alen)))
return topology_updated return topology_updated
@ -377,11 +379,11 @@ class ComputeManager(threading.Thread):
"""Return True if compute set flavors returns success.""" """Return True if compute set flavors returns success."""
flavors = {} flavors = {}
compute = Compute(self.logger) compute = Compute()
status = compute.set_flavors(flavors) status = compute.set_flavors(flavors)
if status != "success": if status != "success":
self.logger.error(status) LOG.error(status)
return False return False
self.data_lock.acquire() self.data_lock.acquire()
@ -399,7 +401,7 @@ class ComputeManager(threading.Thread):
self.resource.flavors[fk] = deepcopy(_flavors[fk]) self.resource.flavors[fk] = deepcopy(_flavors[fk])
self.resource.flavors[fk].last_update = time.time() self.resource.flavors[fk].last_update = time.time()
self.logger.warn("ComputeManager: new flavor (" + LOG.warning("ComputeManager: new flavor (" +
fk + ":" + _flavors[fk].flavor_id + ") added") fk + ":" + _flavors[fk].flavor_id + ") added")
updated = True updated = True
@ -409,7 +411,7 @@ class ComputeManager(threading.Thread):
rf.status = "disabled" rf.status = "disabled"
rf.last_update = time.time() rf.last_update = time.time()
self.logger.warn("ComputeManager: flavor (" + rfk + ":" + LOG.warning("ComputeManager: flavor (" + rfk + ":" +
rf.flavor_id + ") removed") rf.flavor_id + ") removed")
updated = True updated = True
@ -419,7 +421,7 @@ class ComputeManager(threading.Thread):
if self._check_flavor_spec_update(f, rf) is True: if self._check_flavor_spec_update(f, rf) is True:
rf.last_update = time.time() rf.last_update = time.time()
self.logger.warn("ComputeManager: flavor (" + fk + ":" + LOG.warning("ComputeManager: flavor (" + fk + ":" +
rf.flavor_id + ") spec updated") rf.flavor_id + ") spec updated")
updated = True updated = True

View File

@ -15,6 +15,8 @@
import time import time
import traceback import traceback
from oslo_log import log
from valet.engine.optimizer.app_manager.app_topology_base import LEVELS from valet.engine.optimizer.app_manager.app_topology_base import LEVELS
from valet.engine.resource_manager.resource_base import Datacenter from valet.engine.resource_manager.resource_base import Datacenter
from valet.engine.resource_manager.resource_base import Flavor from valet.engine.resource_manager.resource_base import Flavor
@ -22,6 +24,8 @@ from valet.engine.resource_manager.resource_base import Host
from valet.engine.resource_manager.resource_base import HostGroup from valet.engine.resource_manager.resource_base import HostGroup
from valet.engine.resource_manager.resource_base import LogicalGroup from valet.engine.resource_manager.resource_base import LogicalGroup
LOG = log.getLogger(__name__)
class Resource(object): class Resource(object):
"""Resource Class. """Resource Class.
@ -32,12 +36,11 @@ class Resource(object):
updates to base resource types. updates to base resource types.
""" """
def __init__(self, _db, _config, _logger): def __init__(self, _db, _config):
"""Init Resource Class.""" """Init Resource Class."""
self.db = _db self.db = _db
self.config = _config self.config = _config
self.logger = _logger
""" resource data """ """ resource data """
self.datacenter = Datacenter(self.config.datacenter_name) self.datacenter = Datacenter(self.config.datacenter_name)
@ -64,7 +67,7 @@ class Resource(object):
def bootstrap_from_db(self, _resource_status): def bootstrap_from_db(self, _resource_status):
"""Return True if bootsrap resource from database successful.""" """Return True if bootsrap resource from database successful."""
try: try:
self.logger.info("Resource status from DB = %s", _resource_status) LOG.info("Resource status from DB = %s", _resource_status)
logical_groups = _resource_status.get("logical_groups") logical_groups = _resource_status.get("logical_groups")
if logical_groups: if logical_groups:
for lgk, lg in logical_groups.iteritems(): for lgk, lg in logical_groups.iteritems():
@ -78,7 +81,7 @@ class Resource(object):
self.logical_groups[lgk] = logical_group self.logical_groups[lgk] = logical_group
if len(self.logical_groups) == 0: if len(self.logical_groups) == 0:
self.logger.warn("no logical_groups") LOG.warning("no logical_groups")
flavors = _resource_status.get("flavors") flavors = _resource_status.get("flavors")
if flavors: if flavors:
@ -94,7 +97,7 @@ class Resource(object):
self.flavors[fk] = flavor self.flavors[fk] = flavor
if len(self.flavors) == 0: if len(self.flavors) == 0:
self.logger.error("fail loading flavors") LOG.error("fail loading flavors")
hosts = _resource_status.get("hosts") hosts = _resource_status.get("hosts")
if hosts: if hosts:
@ -124,7 +127,7 @@ class Resource(object):
self.hosts[hk] = host self.hosts[hk] = host
if len(self.hosts) == 0: if len(self.hosts) == 0:
self.logger.error("fail loading hosts") LOG.error("fail loading hosts")
host_groups = _resource_status.get("host_groups") host_groups = _resource_status.get("host_groups")
if host_groups: if host_groups:
@ -151,7 +154,7 @@ class Resource(object):
self.host_groups[hgk] = host_group self.host_groups[hgk] = host_group
if len(self.host_groups) == 0: if len(self.host_groups) == 0:
self.logger.warn("fail loading host_groups") LOG.warning("fail loading host_groups")
dc = _resource_status.get("datacenter") dc = _resource_status.get("datacenter")
if dc: if dc:
@ -181,7 +184,7 @@ class Resource(object):
self.datacenter.resources[ck] = self.hosts[ck] self.datacenter.resources[ck] = self.hosts[ck]
if len(self.datacenter.resources) == 0: if len(self.datacenter.resources) == 0:
self.logger.error("fail loading datacenter") LOG.error("fail loading datacenter")
hgs = _resource_status.get("host_groups") hgs = _resource_status.get("host_groups")
if hgs: if hgs:
@ -215,7 +218,7 @@ class Resource(object):
self._update_compute_avail() self._update_compute_avail()
except Exception: except Exception:
self.logger.error("while bootstrap_from_db: ", LOG.error("while bootstrap_from_db: ",
traceback.format_exc()) traceback.format_exc())
return True return True
@ -314,7 +317,7 @@ class Resource(object):
host_group_updates = {} host_group_updates = {}
datacenter_update = None datacenter_update = None
self.logger.info("check and store resource status") LOG.info("check and store resource status")
for fk, flavor in self.flavors.iteritems(): for fk, flavor in self.flavors.iteritems():
if flavor.last_update >= self.curr_db_timestamp: if flavor.last_update >= self.curr_db_timestamp:
@ -366,22 +369,22 @@ class Resource(object):
return True return True
def show_current_logical_groups(self): def show_current_logical_groups(self):
self.logger.debug("--- track logical groups info ---") LOG.debug("--- track logical groups info ---")
for lgk, lg in self.logical_groups.iteritems(): for lgk, lg in self.logical_groups.iteritems():
if lg.status == "enabled": if lg.status == "enabled":
self.logger.debug("lg name = " + lgk) LOG.debug("lg name = " + lgk)
self.logger.debug(" type = " + lg.group_type) LOG.debug(" type = " + lg.group_type)
if lg.group_type == "AGGR": if lg.group_type == "AGGR":
for k in lg.metadata.keys(): for k in lg.metadata.keys():
self.logger.debug(" metadata key = " + k) LOG.debug(" metadata key = " + k)
self.logger.debug(" vms") LOG.debug(" vms")
debug_msg = " orch_id = %s uuid = %s" debug_msg = " orch_id = %s uuid = %s"
for v in lg.vm_list: for v in lg.vm_list:
self.logger.debug(debug_msg, v[0], v[2]) LOG.debug(debug_msg % (v[0], v[2]))
self.logger.debug(" hosts") LOG.debug(" hosts")
for h, v in lg.vms_per_host.iteritems(): for h, v in lg.vms_per_host.iteritems():
self.logger.debug(" host = %s", h) LOG.debug(" host = %s" % h)
self.logger.debug(" vms = %s", LOG.debug(" vms = %s" %
str(len(lg.vms_per_host[h]))) str(len(lg.vms_per_host[h])))
host = None host = None
if h in self.hosts.keys(): if h in self.hosts.keys():
@ -389,43 +392,43 @@ class Resource(object):
elif h in self.host_groups.keys(): elif h in self.host_groups.keys():
host = self.host_groups[h] host = self.host_groups[h]
else: else:
self.logger.error("TEST: lg member not exist") LOG.error("TEST: lg member not exist")
if host is not None: if host is not None:
self.logger.debug(" status = " + host.status) LOG.debug(" status = " + host.status)
if lgk not in host.memberships.keys(): if lgk not in host.memberships.keys():
self.logger.error("TEST: membership missing") LOG.error("TEST: membership missing")
def show_current_host_status(self): def show_current_host_status(self):
self.logger.debug("--- track host info ---") LOG.debug("--- track host info ---")
for hk, h in self.hosts.iteritems(): for hk, h in self.hosts.iteritems():
self.logger.debug("host name = " + hk) LOG.debug("host name = " + hk)
self.logger.debug(" status = " + h.status + ", " + h.state) LOG.debug(" status = " + h.status + ", " + h.state)
self.logger.debug(" vms = " + str(len(h.vm_list))) LOG.debug(" vms = " + str(len(h.vm_list)))
self.logger.debug(" resources (org, total, avail, used)") LOG.debug(" resources (org, total, avail, used)")
cpu_org = str(h.original_vCPUs) cpu_org = str(h.original_vCPUs)
cpu_tot = str(h.vCPUs) cpu_tot = str(h.vCPUs)
cpu_avail = str(h.avail_vCPUs) cpu_avail = str(h.avail_vCPUs)
cpu_used = str(h.vCPUs_used) cpu_used = str(h.vCPUs_used)
msg = " {0} = {1}, {2}, {3}, {4}" msg = " {0} = {1}, {2}, {3}, {4}"
self.logger.debug( LOG.debug(
msg.format('cpu', cpu_org, cpu_tot, cpu_avail, cpu_used)) msg.format('cpu', cpu_org, cpu_tot, cpu_avail, cpu_used))
mem_org = str(h.original_mem_cap) mem_org = str(h.original_mem_cap)
mem_tot = str(h.mem_cap) mem_tot = str(h.mem_cap)
mem_avail = str(h.avail_mem_cap) mem_avail = str(h.avail_mem_cap)
mem_used = str(h.free_mem_mb) mem_used = str(h.free_mem_mb)
self.logger.debug( LOG.debug(
msg.format('mem', mem_org, mem_tot, mem_avail, mem_used)) msg.format('mem', mem_org, mem_tot, mem_avail, mem_used))
dsk_org = str(h.original_local_disk_cap) dsk_org = str(h.original_local_disk_cap)
dsk_tot = str(h.local_disk_cap) dsk_tot = str(h.local_disk_cap)
dsk_avail = str(h.avail_local_disk_cap) dsk_avail = str(h.avail_local_disk_cap)
dsk_used = str(h.free_disk_gb) dsk_used = str(h.free_disk_gb)
self.logger.debug( LOG.debug(
msg.format('disk', dsk_org, dsk_tot, dsk_avail, dsk_used)) msg.format('disk', dsk_org, dsk_tot, dsk_avail, dsk_used))
self.logger.debug(" memberships") LOG.debug(" memberships")
for mk in h.memberships.keys(): for mk in h.memberships.keys():
self.logger.debug(" " + mk) LOG.debug(" " + mk)
if mk not in self.logical_groups.keys(): if mk not in self.logical_groups.keys():
self.logger.error("TEST: lg missing") LOG.error("TEST: lg missing")
def update_rack_resource(self, _host): def update_rack_resource(self, _host):
"""Update resources for rack (host), then update cluster.""" """Update resources for rack (host), then update cluster."""
@ -509,8 +512,9 @@ class Resource(object):
if host.status != _st: if host.status != _st:
host.status = _st host.status = _st
self.logger.info( LOG.warning(
"Resource.update_host_resources: host(%s) status changed", _hn) "Resource.update_host_resources: host(%s) status changed" %
_hn)
updated = True updated = True
# FIXME(GJ): should check cpu, memm and disk here? # FIXME(GJ): should check cpu, memm and disk here?
@ -577,8 +581,8 @@ class Resource(object):
"""Remove vm by orchestration id from lgs. Update host and lgs.""" """Remove vm by orchestration id from lgs. Update host and lgs."""
for lgk in _host.memberships.keys(): for lgk in _host.memberships.keys():
if lgk not in self.logical_groups.keys(): if lgk not in self.logical_groups.keys():
self.logger.warn("logical group (%s) missing while " LOG.warning("logical group (%s) missing while "
"removing %s", lgk, _h_uuid) "removing %s" % (lgk, _h_uuid))
continue continue
lg = self.logical_groups[lgk] lg = self.logical_groups[lgk]
@ -617,8 +621,8 @@ class Resource(object):
"""Remove vm by uuid from lgs and update proper host and lgs.""" """Remove vm by uuid from lgs and update proper host and lgs."""
for lgk in _host.memberships.keys(): for lgk in _host.memberships.keys():
if lgk not in self.logical_groups.keys(): if lgk not in self.logical_groups.keys():
self.logger.warn("logical group (%s) missing while " LOG.warning("logical group (%s) missing while "
"removing %s", lgk, _uuid) "removing %s" % (lgk, _uuid))
continue continue
lg = self.logical_groups[lgk] lg = self.logical_groups[lgk]

View File

@ -16,10 +16,14 @@
"""Topology class - performs actual setting up of Topology object.""" """Topology class - performs actual setting up of Topology object."""
import copy import copy
from oslo_log import log
from sre_parse import isdigit from sre_parse import isdigit
from valet.engine.resource_manager.resource_base import HostGroup from valet.engine.resource_manager.resource_base import HostGroup
LOG = log.getLogger(__name__)
class Topology(object): class Topology(object):
"""Topology class. """Topology class.
@ -27,10 +31,9 @@ class Topology(object):
currently, using cannonical naming convention to find the topology currently, using cannonical naming convention to find the topology
""" """
def __init__(self, _config, _logger): def __init__(self, _config):
"""Init config and logger.""" """Init config and logger."""
self.config = _config self.config = _config
self.logger = _logger
# Triggered by rhosts change # Triggered by rhosts change
def set_topology(self, _datacenter, _host_groups, _hosts, _rhosts): def set_topology(self, _datacenter, _host_groups, _hosts, _rhosts):
@ -52,7 +55,7 @@ class Topology(object):
(region_name, rack_name, _, status) = self._set_layout_by_name(rhk) (region_name, rack_name, _, status) = self._set_layout_by_name(rhk)
if status != "success": if status != "success":
self.logger.warn(status + " in host_name (" + rhk + ")") LOG.warning(status + " in host_name (" + rhk + ")")
if region_name not in _datacenter.region_code_list: if region_name not in _datacenter.region_code_list:
_datacenter.region_code_list.append(region_name) _datacenter.region_code_list.append(region_name)
@ -76,10 +79,10 @@ class Topology(object):
_datacenter.resources[hgk] = hg _datacenter.resources[hgk] = hg
if len(_datacenter.region_code_list) > 1: if len(_datacenter.region_code_list) > 1:
self.logger.warn("more than one region code") LOG.warning("more than one region code")
if "none" in _host_groups.keys(): if "none" in _host_groups.keys():
self.logger.warn("some hosts are into unknown rack") LOG.warning("some hosts are into unknown rack")
return "success" return "success"

View File

@ -15,17 +15,21 @@
import threading import threading
import time import time
from oslo_log import log
from valet.engine.resource_manager.resource_base import Datacenter from valet.engine.resource_manager.resource_base import Datacenter
from valet.engine.resource_manager.resource_base import Host from valet.engine.resource_manager.resource_base import Host
from valet.engine.resource_manager.resource_base import HostGroup from valet.engine.resource_manager.resource_base import HostGroup
from valet.engine.resource_manager.topology import Topology from valet.engine.resource_manager.topology import Topology
LOG = log.getLogger(__name__)
class TopologyManager(threading.Thread): class TopologyManager(threading.Thread):
"""Topology Manager Class.""" """Topology Manager Class."""
def __init__(self, _t_id, _t_name, _resource, def __init__(self, _t_id, _t_name, _resource,
_data_lock, _config, _logger): _data_lock, _config):
"""Init Topology Manager.""" """Init Topology Manager."""
threading.Thread.__init__(self) threading.Thread.__init__(self)
@ -37,13 +41,12 @@ class TopologyManager(threading.Thread):
self.resource = _resource self.resource = _resource
self.config = _config self.config = _config
self.logger = _logger
self.update_batch_wait = self.config.update_batch_wait self.update_batch_wait = self.config.update_batch_wait
def run(self): def run(self):
"""Function starts and tracks Topology Manager Thread.""" """Function starts and tracks Topology Manager Thread."""
self.logger.info("TopologyManager: start " + LOG.info("TopologyManager: start " +
self.thread_name + " ......") self.thread_name + " ......")
if self.config.topology_trigger_freq > 0: if self.config.topology_trigger_freq > 0:
@ -61,17 +64,17 @@ class TopologyManager(threading.Thread):
period_end = (curr_ts + period_end = (curr_ts +
self.config.topology_trigger_freq) self.config.topology_trigger_freq)
# NOTE(GJ): do not timer based batch # NOTE(GJ): do not timer based batch
self.logger.info("exit topology_manager " + self.thread_name) LOG.info("exit topology_manager " + self.thread_name)
def _run(self): def _run(self):
self.logger.info("TopologyManager: --- start topology " LOG.info("TopologyManager: --- start topology "
"status update ---") "status update ---")
if self.set_topology() is not True: if self.set_topology() is not True:
self.logger.warn("fail to set topology") LOG.warning("fail to set topology")
self.logger.info("--- done topology status update ---") LOG.info("--- done topology status update ---")
def set_topology(self): def set_topology(self):
host_groups = {} host_groups = {}
@ -80,7 +83,7 @@ class TopologyManager(threading.Thread):
# NOTE(GJ): do not consider switch topology at this version # NOTE(GJ): do not consider switch topology at this version
datacenter = Datacenter(self.config.datacenter_name) datacenter = Datacenter(self.config.datacenter_name)
topology = Topology(self.config, self.logger) topology = Topology(self.config)
status = topology.set_topology(datacenter, host_groups, hosts, status = topology.set_topology(datacenter, host_groups, hosts,
self.resource.hosts) self.resource.hosts)
@ -104,7 +107,7 @@ class TopologyManager(threading.Thread):
new_host.last_update = time.time() new_host.last_update = time.time()
self.logger.info("TopologyManager: new host (" + LOG.warning("TopologyManager: new host (" +
new_host.name + ") added from configuration") new_host.name + ") added from configuration")
updated = True updated = True
@ -116,7 +119,7 @@ class TopologyManager(threading.Thread):
host.last_update = time.time() host.last_update = time.time()
self.logger.info("TopologyManager: host (" + LOG.warning("TopologyManager: host (" +
host.name + ") removed from configuration") host.name + ") removed from configuration")
updated = True updated = True
@ -127,7 +130,7 @@ class TopologyManager(threading.Thread):
new_host_group.last_update = time.time() new_host_group.last_update = time.time()
self.logger.info("TopologyManager: new host_group (" + LOG.warning("TopologyManager: new host_group (" +
new_host_group.name + ") added") new_host_group.name + ") added")
updated = True updated = True
@ -138,7 +141,7 @@ class TopologyManager(threading.Thread):
host_group.last_update = time.time() host_group.last_update = time.time()
self.logger.info("TopologyManager: host_group (" + LOG.warning("TopologyManager: host_group (" +
host_group.name + ") disabled") host_group.name + ") disabled")
updated = True updated = True
@ -191,7 +194,7 @@ class TopologyManager(threading.Thread):
if "infra" not in _rhost.tag: if "infra" not in _rhost.tag:
_rhost.tag.append("infra") _rhost.tag.append("infra")
updated = True updated = True
self.logger.info("TopologyManager: host (" + _rhost.name + LOG.warning("TopologyManager: host (" + _rhost.name +
") updated (tag)") ") updated (tag)")
if (_rhost.host_group is None or if (_rhost.host_group is None or
@ -203,7 +206,7 @@ class TopologyManager(threading.Thread):
else: else:
_rhost.host_group = self.resource.datacenter _rhost.host_group = self.resource.datacenter
updated = True updated = True
self.logger.info("TopologyManager: host (" + _rhost.name + LOG.warning("TopologyManager: host (" + _rhost.name +
") updated (host_group)") ") updated (host_group)")
return updated return updated
@ -214,13 +217,13 @@ class TopologyManager(threading.Thread):
if _hg.host_type != _rhg.host_type: if _hg.host_type != _rhg.host_type:
_rhg.host_type = _hg.host_type _rhg.host_type = _hg.host_type
updated = True updated = True
self.logger.info("TopologyManager: host_group (" + _rhg.name + LOG.warning("TopologyManager: host_group (" + _rhg.name +
") updated (hosting type)") ") updated (hosting type)")
if _rhg.status == "disabled": if _rhg.status == "disabled":
_rhg.status = "enabled" _rhg.status = "enabled"
updated = True updated = True
self.logger.info("TopologyManager: host_group (" + _rhg.name + LOG.warning("TopologyManager: host_group (" + _rhg.name +
") updated (enabled)") ") updated (enabled)")
if _hg.parent_resource != _rhg.parent_resource: if _hg.parent_resource != _rhg.parent_resource:
@ -230,7 +233,7 @@ class TopologyManager(threading.Thread):
else: else:
_rhg.parent_resource = self.resource.datacenter _rhg.parent_resource = self.resource.datacenter
updated = True updated = True
self.logger.info("TopologyManager: host_group (" + _rhg.name + LOG.warning("TopologyManager: host_group (" + _rhg.name +
") updated (parent host_group)") ") updated (parent host_group)")
for rk in _hg.child_resources.keys(): for rk in _hg.child_resources.keys():
@ -245,7 +248,7 @@ class TopologyManager(threading.Thread):
elif _rhg.host_type == "cluster": elif _rhg.host_type == "cluster":
_rhg.child_resources[rk] = self.resource.host_groups[rk] _rhg.child_resources[rk] = self.resource.host_groups[rk]
updated = True updated = True
self.logger.info("TopologyManager: host_group (" + _rhg.name + LOG.warning("TopologyManager: host_group (" + _rhg.name +
") updated (new child host)") ") updated (new child host)")
for rrk in _rhg.child_resources.keys(): for rrk in _rhg.child_resources.keys():
@ -257,7 +260,7 @@ class TopologyManager(threading.Thread):
if exist is False: if exist is False:
del _rhg.child_resources[rrk] del _rhg.child_resources[rrk]
updated = True updated = True
self.logger.info("TopologyManager: host_group (" + _rhg.name + LOG.warning("TopologyManager: host_group (" + _rhg.name +
") updated (child host removed)") ") updated (child host removed)")
return updated return updated
@ -269,7 +272,7 @@ class TopologyManager(threading.Thread):
if rc not in self.resource.datacenter.region_code_list: if rc not in self.resource.datacenter.region_code_list:
self.resource.datacenter.region_code_list.append(rc) self.resource.datacenter.region_code_list.append(rc)
updated = True updated = True
self.logger.info("TopologyManager: datacenter updated " LOG.warning("TopologyManager: datacenter updated "
"(new region code, " + rc + ")") "(new region code, " + rc + ")")
code_list = self.resource.datacenter.region_code_list code_list = self.resource.datacenter.region_code_list
@ -279,7 +282,7 @@ class TopologyManager(threading.Thread):
if alen != blen: if alen != blen:
updated = True updated = True
self.resource.datacenter.region_code_list = code_list self.resource.datacenter.region_code_list = code_list
self.logger.info("datacenter updated (region code removed)") LOG.warning("datacenter updated (region code removed)")
for rk in _datacenter.resources.keys(): for rk in _datacenter.resources.keys():
exist = False exist = False
@ -296,7 +299,7 @@ class TopologyManager(threading.Thread):
self.resource.datacenter.resources[rk] = \ self.resource.datacenter.resources[rk] = \
self.resource.hosts[rk] self.resource.hosts[rk]
updated = True updated = True
self.logger.info("TopologyManager: datacenter updated " LOG.warning("TopologyManager: datacenter updated "
"(new resource)") "(new resource)")
for rrk in self.resource.datacenter.resources.keys(): for rrk in self.resource.datacenter.resources.keys():
@ -308,7 +311,7 @@ class TopologyManager(threading.Thread):
if exist is False: if exist is False:
del self.resource.datacenter.resources[rrk] del self.resource.datacenter.resources[rrk]
updated = True updated = True
self.logger.info("TopologyManager: datacenter updated " LOG.warning("TopologyManager: datacenter updated "
"(resource removed)") "(resource removed)")
return updated return updated

View File

@ -12,16 +12,11 @@
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and # See the License for the specific language governing permissions and
# limitations under the License. # limitations under the License.
"""Test Search."""
import logging
import mock import mock
from valet.engine.optimizer.ostro.search import Search from valet.engine.optimizer.ostro.search import Search
from valet.tests.base import Base from valet.tests.base import Base
LOG = logging.getLogger(__name__)
class TestSearch(Base): class TestSearch(Base):
"""Unit tests for valet.engine.optimizer.ostro.search.""" """Unit tests for valet.engine.optimizer.ostro.search."""
@ -30,7 +25,7 @@ class TestSearch(Base):
"""Setup Test Search Class.""" """Setup Test Search Class."""
super(TestSearch, self).setUp() super(TestSearch, self).setUp()
self.search = Search(LOG) self.search = Search()
def test_copy_resource_status(self): def test_copy_resource_status(self):
"""Test Copy Resource Status.""" """Test Copy Resource Status."""

View File

@ -25,7 +25,7 @@ class TestTopology(Base):
def setUp(self): def setUp(self):
"""Setup TestTopology Test Class.""" """Setup TestTopology Test Class."""
super(TestTopology, self).setUp() super(TestTopology, self).setUp()
self.topo = Topology(Config(), None) self.topo = Topology(Config())
def test_simple_topology(self): def test_simple_topology(self):
"""Validate simple topology (region, rack, node_type and status).""" """Validate simple topology (region, rack, node_type and status)."""