Remove code unrelated to alarming

This commit removes everything that is not related to alarming.

Change-Id: Iec63d8ad00e253e962d8525080125ae4e1cc468e
This commit is contained in:
Julien Danjou 2015-05-25 07:26:37 +02:00
parent d9b04cd19c
commit 9ba3ef0ee6
457 changed files with 84 additions and 62331 deletions

View File

@ -47,21 +47,6 @@ M: Doug Hellmann (dhellmann) <doug.hellmann@dreamhost.com>
S: Maintained
F: api/
== events ==
M: Julien Danjou (jd__)
M: Sandy Walsh (sandywalsh)
M: Monsyne Dragon (dragondm)
S: Maintained
F: notification.py
F: storage/
== pipeline ==
M: Julien Danjou (jd__)
S: Maintained
F: publisher/, transformer/, pipeline.py
== storage ==
-- DB2 --

View File

@ -1,358 +0,0 @@
#
# Copyright 2013 Julien Danjou
# Copyright 2014 Red Hat, Inc
#
# Authors: Julien Danjou <julien@danjou.info>
# Eoghan Glynn <eglynn@redhat.com>
# Nejc Saje <nsaje@redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import fnmatch
import itertools
import random
from oslo_config import cfg
from oslo_context import context
from oslo_log import log
from oslo_service import service as os_service
import six
from six import moves
from six.moves.urllib import parse as urlparse
from stevedore import extension
from ceilometer.agent import plugin_base
from ceilometer import coordination
from ceilometer.i18n import _
from ceilometer import pipeline as publish_pipeline
from ceilometer import utils
LOG = log.getLogger(__name__)
OPTS = [
cfg.IntOpt('shuffle_time_before_polling_task',
default=0,
help='To reduce large requests at same time to Nova or other '
'components from different compute agents, shuffle '
'start time of polling task.'),
]
cfg.CONF.register_opts(OPTS)
class PollsterListForbidden(Exception):
def __init__(self):
msg = ('It is forbidden to use pollster-list option of polling agent '
'in case of using coordination between multiple agents. Please '
'use either multiple agents being coordinated or polling list '
'option for one polling agent.')
super(PollsterListForbidden, self).__init__(msg)
class Resources(object):
def __init__(self, agent_manager):
self.agent_manager = agent_manager
self._resources = []
self._discovery = []
self.blacklist = []
self.last_dup = []
def setup(self, pipeline):
self._resources = pipeline.resources
self._discovery = pipeline.discovery
def get(self, discovery_cache=None):
source_discovery = (self.agent_manager.discover(self._discovery,
discovery_cache)
if self._discovery else [])
static_resources = []
if self._resources:
static_resources_group = self.agent_manager.construct_group_id(
utils.hash_of_set(self._resources))
p_coord = self.agent_manager.partition_coordinator
static_resources = p_coord.extract_my_subset(
static_resources_group, self._resources)
return static_resources + source_discovery
@staticmethod
def key(source_name, pollster):
return '%s-%s' % (source_name, pollster.name)
class PollingTask(object):
"""Polling task for polling samples and inject into pipeline.
A polling task can be invoked periodically or only once.
"""
def __init__(self, agent_manager):
self.manager = agent_manager
# elements of the Cartesian product of sources X pollsters
# with a common interval
self.pollster_matches = collections.defaultdict(set)
# per-sink publisher contexts associated with each source
self.publishers = {}
# we relate the static resources and per-source discovery to
# each combination of pollster and matching source
resource_factory = lambda: Resources(agent_manager)
self.resources = collections.defaultdict(resource_factory)
def add(self, pollster, pipeline):
if pipeline.source.name not in self.publishers:
publish_context = publish_pipeline.PublishContext(
self.manager.context)
self.publishers[pipeline.source.name] = publish_context
self.publishers[pipeline.source.name].add_pipelines([pipeline])
self.pollster_matches[pipeline.source.name].add(pollster)
key = Resources.key(pipeline.source.name, pollster)
self.resources[key].setup(pipeline)
def poll_and_publish(self):
"""Polling sample and publish into pipeline."""
cache = {}
discovery_cache = {}
for source_name in self.pollster_matches:
with self.publishers[source_name] as publisher:
for pollster in self.pollster_matches[source_name]:
LOG.info(_("Polling pollster %(poll)s in the context of "
"%(src)s"),
dict(poll=pollster.name, src=source_name))
key = Resources.key(source_name, pollster)
candidate_res = list(
self.resources[key].get(discovery_cache))
if not candidate_res and pollster.obj.default_discovery:
candidate_res = self.manager.discover(
[pollster.obj.default_discovery], discovery_cache)
# Remove duplicated resources and black resources. Using
# set() requires well defined __hash__ for each resource.
# Since __eq__ is defined, 'not in' is safe here.
seen = []
duplicated = []
polling_resources = []
black_res = self.resources[key].blacklist
for x in candidate_res:
if x not in seen:
seen.append(x)
if x not in black_res:
polling_resources.append(x)
else:
duplicated.append(x)
# Warn duplicated resources for the 1st time
if self.resources[key].last_dup != duplicated:
self.resources[key].last_dup = duplicated
LOG.warning(_(
'Found following duplicated resoures for '
'%(name)s in context of %(source)s:%(list)s. '
'Check pipeline configuration.')
% ({'name': pollster.name,
'source': source_name,
'list': duplicated
}))
# If no resources, skip for this pollster
if not polling_resources:
LOG.info(_("Skip polling pollster %s, no resources"
" found"), pollster.name)
continue
try:
samples = list(pollster.obj.get_samples(
manager=self.manager,
cache=cache,
resources=polling_resources
))
publisher(samples)
except plugin_base.PollsterPermanentError as err:
LOG.error(_(
'Prevent pollster %(name)s for '
'polling source %(source)s anymore!')
% ({'name': pollster.name, 'source': source_name}))
self.resources[key].blacklist.append(err.fail_res)
except Exception as err:
LOG.warning(_(
'Continue after error from %(name)s: %(error)s')
% ({'name': pollster.name, 'error': err}),
exc_info=True)
class AgentManager(os_service.Service):
def __init__(self, namespaces, pollster_list, group_prefix=None):
# features of using coordination and pollster-list are exclusive, and
# cannot be used at one moment to avoid both samples duplication and
# samples being lost
if pollster_list and cfg.CONF.coordination.backend_url:
raise PollsterListForbidden()
super(AgentManager, self).__init__()
def _match(pollster):
"""Find out if pollster name matches to one of the list."""
return any(fnmatch.fnmatch(pollster.name, pattern) for
pattern in pollster_list)
if type(namespaces) is not list:
namespaces = [namespaces]
# we'll have default ['compute', 'central'] here if no namespaces will
# be passed
extensions = (self._extensions('poll', namespace).extensions
for namespace in namespaces)
if pollster_list:
extensions = (moves.filter(_match, exts)
for exts in extensions)
self.extensions = list(itertools.chain(*list(extensions)))
self.discovery_manager = self._extensions('discover')
self.context = context.RequestContext('admin', 'admin', is_admin=True)
self.partition_coordinator = coordination.PartitionCoordinator()
# Compose coordination group prefix.
# We'll use namespaces as the basement for this partitioning.
namespace_prefix = '-'.join(sorted(namespaces))
self.group_prefix = ('%s-%s' % (namespace_prefix, group_prefix)
if group_prefix else namespace_prefix)
@staticmethod
def _extensions(category, agent_ns=None):
namespace = ('ceilometer.%s.%s' % (category, agent_ns) if agent_ns
else 'ceilometer.%s' % category)
def _catch_extension_load_error(mgr, ep, exc):
# Extension raising ExtensionLoadError can be ignored,
# and ignore anything we can't import as a safety measure.
if isinstance(exc, plugin_base.ExtensionLoadError):
LOG.error(_("Skip loading extension for %s") % ep.name)
return
if isinstance(exc, ImportError):
LOG.error(
_("Failed to import extension for %(name)s: %(error)s"),
{'name': ep.name, 'error': exc},
)
return
raise exc
return extension.ExtensionManager(
namespace=namespace,
invoke_on_load=True,
on_load_failure_callback=_catch_extension_load_error,
)
def join_partitioning_groups(self):
groups = set([self.construct_group_id(d.obj.group_id)
for d in self.discovery_manager])
# let each set of statically-defined resources have its own group
static_resource_groups = set([
self.construct_group_id(utils.hash_of_set(p.resources))
for p in self.pipeline_manager.pipelines
if p.resources
])
groups.update(static_resource_groups)
for group in groups:
self.partition_coordinator.join_group(group)
def create_polling_task(self):
"""Create an initially empty polling task."""
return PollingTask(self)
def setup_polling_tasks(self):
polling_tasks = {}
for pipeline in self.pipeline_manager.pipelines:
for pollster in self.extensions:
if pipeline.support_meter(pollster.name):
polling_task = polling_tasks.get(pipeline.get_interval())
if not polling_task:
polling_task = self.create_polling_task()
polling_tasks[pipeline.get_interval()] = polling_task
polling_task.add(pollster, pipeline)
return polling_tasks
def construct_group_id(self, discovery_group_id):
return ('%s-%s' % (self.group_prefix,
discovery_group_id)
if discovery_group_id else None)
def start(self):
self.pipeline_manager = publish_pipeline.setup_pipeline()
self.partition_coordinator.start()
self.join_partitioning_groups()
# allow time for coordination if necessary
delay_start = self.partition_coordinator.is_active()
# set shuffle time before polling task if necessary
delay_polling_time = random.randint(
0, cfg.CONF.shuffle_time_before_polling_task)
for interval, task in six.iteritems(self.setup_polling_tasks()):
delay_time = (interval + delay_polling_time if delay_start
else delay_polling_time)
self.tg.add_timer(interval,
self.interval_task,
initial_delay=delay_time,
task=task)
self.tg.add_timer(cfg.CONF.coordination.heartbeat,
self.partition_coordinator.heartbeat)
def stop(self):
if self.partition_coordinator:
self.partition_coordinator.stop()
super(AgentManager, self).stop()
@staticmethod
def interval_task(task):
task.poll_and_publish()
@staticmethod
def _parse_discoverer(url):
s = urlparse.urlparse(url)
return (s.scheme or s.path), (s.netloc + s.path if s.scheme else None)
def _discoverer(self, name):
for d in self.discovery_manager:
if d.name == name:
return d.obj
return None
def discover(self, discovery=None, discovery_cache=None):
resources = []
discovery = discovery or []
for url in discovery:
if discovery_cache is not None and url in discovery_cache:
resources.extend(discovery_cache[url])
continue
name, param = self._parse_discoverer(url)
discoverer = self._discoverer(name)
if discoverer:
try:
discovered = discoverer.discover(self, param)
partitioned = self.partition_coordinator.extract_my_subset(
self.construct_group_id(discoverer.group_id),
discovered)
resources.extend(partitioned)
if discovery_cache is not None:
discovery_cache[url] = partitioned
except Exception as err:
LOG.exception(_('Unable to discover resources: %s') % err)
else:
LOG.warning(_('Unknown discovery extension: %s') % name)
return resources

View File

@ -1,45 +0,0 @@
# Copyright 2014-2015 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
from ceilometer.agent import plugin_base as plugin
from ceilometer.i18n import _LW
LOG = log.getLogger(__name__)
cfg.CONF.import_group('service_credentials', 'ceilometer.service')
class EndpointDiscovery(plugin.DiscoveryBase):
"""Discovery that supplies service endpoints.
This discovery should be used when the relevant APIs are not well suited
to dividing the pollster's work into smaller pieces than a whole service
at once. Example of this is the floating_ip pollster which calls
nova.floating_ips.list() and therefore gets all floating IPs at once.
"""
@staticmethod
def discover(manager, param=None):
endpoints = manager.keystone.service_catalog.get_urls(
service_type=param,
endpoint_type=cfg.CONF.service_credentials.os_endpoint_type,
region_name=cfg.CONF.service_credentials.os_region_name)
if not endpoints:
LOG.warning(_LW('No endpoints found for service %s'),
"<all services>" if param is None else param)
return []
return endpoints

View File

@ -1,21 +0,0 @@
# Copyright 2015 Intel
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometer.agent import plugin_base
class LocalNodeDiscovery(plugin_base.DiscoveryBase):
def discover(self, manager, param=None):
"""Return local node as resource."""
return ['local_host']

View File

@ -1,35 +0,0 @@
# Copyright 2014 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
from ceilometer.agent import plugin_base as plugin
LOG = log.getLogger(__name__)
cfg.CONF.import_group('service_credentials', 'ceilometer.service')
class TenantDiscovery(plugin.DiscoveryBase):
"""Discovery that supplies keystone tenants.
This discovery should be used when the pollster's work can't be divided
into smaller pieces than per-tenant. Example of this is the Swift
pollster, which polls account details and does so per-tenant.
"""
def discover(self, manager, param=None):
tenants = manager.keystone.tenants.list()
return tenants or []

View File

@ -1,52 +0,0 @@
#
# Copyright 2012-2013 eNovance <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
from ceilometer.agent import base
from ceilometer import keystone_client
OPTS = [
cfg.StrOpt('partitioning_group_prefix',
default=None,
deprecated_group='central',
help='Work-load partitioning group prefix. Use only if you '
'want to run multiple polling agents with different '
'config files. For each sub-group of the agent '
'pool with the same partitioning_group_prefix a disjoint '
'subset of pollsters should be loaded.'),
]
cfg.CONF.register_opts(OPTS, group='polling')
LOG = log.getLogger(__name__)
class AgentManager(base.AgentManager):
def __init__(self, namespaces=None, pollster_list=None):
namespaces = namespaces or ['compute', 'central']
pollster_list = pollster_list or []
super(AgentManager, self).__init__(
namespaces, pollster_list,
group_prefix=cfg.CONF.polling.partitioning_group_prefix)
def interval_task(self, task):
try:
self.keystone = keystone_client.get_client()
except Exception as e:
self.keystone = e
super(AgentManager, self).interval_task(task)

View File

@ -1,271 +0,0 @@
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base class for plugins.
"""
import abc
import collections
from keystoneclient.v2_0 import client as ksclient
from oslo_config import cfg
from oslo_context import context
from oslo_log import log
import oslo_messaging
import six
from ceilometer.i18n import _
from ceilometer import messaging
cfg.CONF.import_group('service_credentials', 'ceilometer.service')
LOG = log.getLogger(__name__)
ExchangeTopics = collections.namedtuple('ExchangeTopics',
['exchange', 'topics'])
def _get_keystone():
try:
return ksclient.Client(
username=cfg.CONF.service_credentials.os_username,
password=cfg.CONF.service_credentials.os_password,
tenant_id=cfg.CONF.service_credentials.os_tenant_id,
tenant_name=cfg.CONF.service_credentials.os_tenant_name,
cacert=cfg.CONF.service_credentials.os_cacert,
auth_url=cfg.CONF.service_credentials.os_auth_url,
region_name=cfg.CONF.service_credentials.os_region_name,
insecure=cfg.CONF.service_credentials.insecure)
except Exception as e:
return e
def check_keystone(service_type=None):
"""Decorator function to check if manager has valid keystone client.
Also checks if the service is registered/enabled in Keystone.
:param service_type: name of service in Keystone
"""
def wrapped(f):
def func(self, *args, **kwargs):
manager = kwargs.get('manager')
if not manager and len(args) > 0:
manager = args[0]
keystone = getattr(manager, 'keystone', None)
if not keystone:
keystone = _get_keystone()
if isinstance(keystone, Exception):
LOG.error(_('Skip due to keystone error %s'),
keystone if keystone else '')
return iter([])
elif service_type:
endpoints = keystone.service_catalog.get_endpoints(
service_type=service_type)
if not endpoints:
LOG.warning(_('Skipping because %s service is not '
'registered in keystone') % service_type)
return iter([])
return f(self, *args, **kwargs)
return func
return wrapped
class PluginBase(object):
"""Base class for all plugins."""
@six.add_metaclass(abc.ABCMeta)
class NotificationBase(PluginBase):
"""Base class for plugins that support the notification API."""
def __init__(self, manager):
super(NotificationBase, self).__init__()
# NOTE(gordc): this is filter rule used by oslo.messaging to dispatch
# messages to an endpoint.
self.filter_rule = oslo_messaging.NotificationFilter(
event_type='|'.join(self.event_types))
self.manager = manager
@abc.abstractproperty
def event_types(self):
"""Return a sequence of strings.
Strings are defining the event types to be given to this plugin.
"""
@abc.abstractmethod
def get_targets(self, conf):
"""Return a sequence of oslo.messaging.Target.
Sequence is defining the exchange and topics to be connected for this
plugin.
:param conf: Configuration.
"""
@abc.abstractmethod
def process_notification(self, message):
"""Return a sequence of Counter instances for the given message.
:param message: Message to process.
"""
def info(self, ctxt, publisher_id, event_type, payload, metadata):
"""RPC endpoint for notification messages
When another service sends a notification over the message
bus, this method receives it.
:param ctxt: oslo.messaging context
:param publisher_id: publisher of the notification
:param event_type: type of notification
:param payload: notification payload
:param metadata: metadata about the notification
"""
notification = messaging.convert_to_old_notification_format(
'info', ctxt, publisher_id, event_type, payload, metadata)
self.to_samples_and_publish(context.get_admin_context(), notification)
def to_samples_and_publish(self, context, notification):
"""Return samples produced by *process_notification*.
Samples produced for the given notification.
:param context: Execution context from the service or RPC call
:param notification: The notification to process.
"""
with self.manager.publisher(context) as p:
p(list(self.process_notification(notification)))
class NonMetricNotificationBase(object):
"""Use to mark non-measurement meters
There are a number of historical non-measurement meters that should really
be captured as events. This common base allows us to disable these invalid
meters.
"""
pass
class ExtensionLoadError(Exception):
"""Error of loading pollster plugin.
PollsterBase provides a hook, setup_environment, called in pollster loading
to setup required HW/SW dependency. Any exception from it would be
propagated as ExtensionLoadError, then skip loading this pollster.
"""
pass
class PollsterPermanentError(Exception):
"""Permenant error when polling.
When unrecoverable error happened in polling, pollster can raise this
exception with failed resource to prevent itself from polling any more.
Resource is one of parameter resources from get_samples that cause polling
error.
"""
def __init__(self, resource):
self.fail_res = resource
@six.add_metaclass(abc.ABCMeta)
class PollsterBase(PluginBase):
"""Base class for plugins that support the polling API."""
def setup_environment(self):
"""Setup required environment for pollster.
Each subclass could overwrite it for specific usage. Any exception
raised in this function would prevent pollster being loaded.
"""
pass
def __init__(self):
super(PollsterBase, self).__init__()
try:
self.setup_environment()
except Exception as err:
raise ExtensionLoadError(err)
@abc.abstractproperty
def default_discovery(self):
"""Default discovery to use for this pollster.
There are three ways a pollster can get a list of resources to poll,
listed here in ascending order of precedence:
1. from the per-agent discovery,
2. from the per-pollster discovery (defined here)
3. from the per-pipeline configured discovery and/or per-pipeline
configured static resources.
If a pollster should only get resources from #1 or #3, this property
should be set to None.
"""
@abc.abstractmethod
def get_samples(self, manager, cache, resources):
"""Return a sequence of Counter instances from polling the resources.
:param manager: The service manager class invoking the plugin.
:param cache: A dictionary to allow pollsters to pass data
between themselves when recomputing it would be
expensive (e.g., asking another service for a
list of objects).
:param resources: A list of resources the pollster will get data
from. It's up to the specific pollster to decide
how to use it. It is usually supplied by a discovery,
see ``default_discovery`` for more information.
"""
@six.add_metaclass(abc.ABCMeta)
class DiscoveryBase(object):
@abc.abstractmethod
def discover(self, manager, param=None):
"""Discover resources to monitor.
The most fine-grained discovery should be preferred, so the work is
the most evenly distributed among multiple agents (if they exist).
For example:
if the pollster can separately poll individual resources, it should
have its own discovery implementation to discover those resources. If
it can only poll per-tenant, then the `TenantDiscovery` should be
used. If even that is not possible, use `EndpointDiscovery` (see
their respective docstrings).
:param manager: The service manager class invoking the plugin.
:param param: an optional parameter to guide the discovery
"""
@property
def group_id(self):
"""Return group id of this discovery.
All running recoveries with the same group_id should return the same
set of resources at a given point in time. By default, a discovery is
put into a global group, meaning that all discoveries of its type
running anywhere in the cloud, return the same set of resources.
This property can be overridden to provide correct grouping of
localized discoveries. For example, compute discovery is localized
to a host, which is reflected in its group_id.
A None value signifies that this discovery does not want to be part
of workload partitioning at all.
"""
return 'global'

View File

@ -26,7 +26,6 @@ import pymongo
from ceilometer.alarm.storage import pymongo_base
from ceilometer import storage
from ceilometer.storage import impl_mongodb
from ceilometer.storage.mongo import utils as pymongo_utils
cfg.CONF.import_opt('alarm_history_time_to_live', 'ceilometer.alarm.storage',
@ -63,11 +62,36 @@ class Connection(pymongo_base.Connection):
# needed.
self.upgrade()
@staticmethod
def update_ttl(ttl, ttl_index_name, index_field, coll):
"""Update or ensure time_to_live indexes.
:param ttl: time to live in seconds.
:param ttl_index_name: name of the index we want to update or ensure.
:param index_field: field with the index that we need to update.
:param coll: collection which indexes need to be updated.
"""
indexes = coll.index_information()
if ttl <= 0:
if ttl_index_name in indexes:
coll.drop_index(ttl_index_name)
return
if ttl_index_name in indexes:
return coll.database.command(
'collMod', coll.name,
index={'keyPattern': {index_field: pymongo.ASCENDING},
'expireAfterSeconds': ttl})
coll.ensure_index([(index_field, pymongo.ASCENDING)],
expireAfterSeconds=ttl,
name=ttl_index_name)
def upgrade(self):
super(Connection, self).upgrade()
# Establish indexes
ttl = cfg.CONF.database.alarm_history_time_to_live
impl_mongodb.Connection.update_ttl(
self.update_ttl(
ttl, 'alarm_history_ttl', 'timestamp', self.db.alarm_history)
def clear(self):

View File

@ -14,7 +14,6 @@
from __future__ import absolute_import
import datetime
import os
from oslo_config import cfg
from oslo_db.sqlalchemy import session as db_session
@ -45,32 +44,7 @@ AVAILABLE_STORAGE_CAPABILITIES = {
class Connection(base.Connection):
"""Put the data into a SQLAlchemy database.
Tables::
- meter
- meter definition
- { id: meter def id
name: meter name
type: meter type
unit: meter unit
}
- sample
- the raw incoming data
- { id: sample id
meter_id: meter id (->meter.id)
user_id: user uuid
project_id: project uuid
resource_id: resource uuid
source_id: source id
resource_metadata: metadata dictionaries
volume: sample volume
timestamp: datetime
message_signature: message signature
message_id: message uuid
}
"""
"""Put the data into a SQLAlchemy database. """
CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES,
AVAILABLE_CAPABILITIES)
STORAGE_CAPABILITIES = utils.update_nested(
@ -88,12 +62,8 @@ class Connection(base.Connection):
self._engine_facade = db_session.EngineFacade(url, **options)
def upgrade(self):
# NOTE(gordc): to minimise memory, only import migration when needed
from oslo_db.sqlalchemy import migration
path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'..', '..', 'storage', 'sqlalchemy',
'migrate_repo')
migration.db_sync(self._engine_facade.get_engine(), path)
engine = self._engine_facade.get_engine()
models.Base.metadata.create_all(engine)
def clear(self):
engine = self._engine_facade.get_engine()

View File

@ -28,6 +28,8 @@ from ceilometer.api import middleware
from ceilometer.i18n import _
from ceilometer.i18n import _LW
from ceilometer import service
from ceilometer import storage
LOG = log.getLogger(__name__)
@ -61,8 +63,8 @@ def get_pecan_config():
def setup_app(pecan_config=None, extra_hooks=None):
# FIXME: Replace DBHook with a hooks.TransactionHook
app_hooks = [hooks.ConfigHook(),
hooks.DBHook(),
hooks.NotifierHook(),
hooks.DBHook(
storage.get_connection_from_config(cfg.CONF, 'alarm'),),
hooks.TranslationHook()]
if extra_hooks:
app_hooks.extend(extra_hooks)

View File

@ -22,7 +22,6 @@ import ast
import datetime
import functools
import inspect
import json
from oslo_utils import strutils
from oslo_utils import timeutils
@ -108,24 +107,6 @@ class Base(wtypes.DynamicBase):
getattr(self, k) != wsme.Unset)
class Link(Base):
"""A link representation."""
href = wtypes.text
"The url of a link"
rel = wtypes.text
"The name of a link"
@classmethod
def sample(cls):
return cls(href=('http://localhost:8777/v2/meters/volume?'
'q.field=resource_id&'
'q.value=bd9431c1-8d69-4ad3-803a-8d4a6b89fd36'),
rel='volume'
)
class Query(Base):
"""Query filter."""
@ -258,16 +239,3 @@ class AlarmRule(Base):
@staticmethod
def update_hook(alarm):
pass
class JsonType(wtypes.UserType):
"""A simple JSON type."""
basetype = wtypes.text
name = 'json'
@staticmethod
def validate(value):
# check that value can be serialised
json.dumps(value)
return value

View File

@ -39,52 +39,20 @@ class Capabilities(base.Base):
api = {wtypes.text: bool}
"A flattened dictionary of API capabilities"
storage = {wtypes.text: bool}
"A flattened dictionary of storage capabilities"
alarm_storage = {wtypes.text: bool}
"A flattened dictionary of alarm storage capabilities"
event_storage = {wtypes.text: bool}
"A flattened dictionary of event storage capabilities"
@classmethod
def sample(cls):
return cls(
api=_flatten_capabilities({
'meters': {'query': {'simple': True,
'metadata': True,
'complex': False}},
'resources': {'query': {'simple': True,
'metadata': True,
'complex': False}},
'samples': {'query': {'simple': True,
'metadata': True,
'complex': True}},
'statistics': {'groupby': True,
'query': {'simple': True,
'metadata': True,
'complex': False},
'aggregation': {'standard': True,
'selectable': {
'max': True,
'min': True,
'sum': True,
'avg': True,
'count': True,
'stddev': True,
'cardinality': True,
'quartile': False}}},
'alarms': {'query': {'simple': True,
'complex': True},
'history': {'query': {'simple': True,
'complex': True}}},
'events': {'query': {'simple': True}},
}),
storage=_flatten_capabilities(
{'storage': {'production_ready': True}}),
alarm_storage=_flatten_capabilities(
{'storage': {'production_ready': True}}),
event_storage=_flatten_capabilities(
{'storage': {'production_ready': True}}),
)
@ -99,18 +67,11 @@ class CapabilitiesController(rest.RestController):
"""
# variation in API capabilities is effectively determined by
# the lack of strict feature parity across storage drivers
conn = pecan.request.storage_conn
alarm_conn = pecan.request.alarm_storage_conn
event_conn = pecan.request.event_storage_conn
driver_capabilities = conn.get_capabilities().copy()
driver_capabilities['alarms'] = alarm_conn.get_capabilities()['alarms']
driver_capabilities['events'] = event_conn.get_capabilities()['events']
driver_perf = conn.get_storage_capabilities()
driver_capabilities = {
'alarms': alarm_conn.get_capabilities()['alarms'],
}
alarm_driver_perf = alarm_conn.get_storage_capabilities()
event_driver_perf = event_conn.get_storage_capabilities()
return Capabilities(api=_flatten_capabilities(driver_capabilities),
storage=_flatten_capabilities(driver_perf),
alarm_storage=_flatten_capabilities(
alarm_driver_perf),
event_storage=_flatten_capabilities(
event_driver_perf))
alarm_driver_perf))

View File

@ -1,278 +0,0 @@
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
# Copyright 2013 IBM Corp.
# Copyright 2013 eNovance <licensing@enovance.com>
# Copyright Ericsson AB 2013. All rights reserved
# Copyright 2014 Hewlett-Packard Company
# Copyright 2015 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from oslo_log import log
import pecan
from pecan import rest
import six
import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from ceilometer.api.controllers.v2 import base
from ceilometer.api.controllers.v2 import utils as v2_utils
from ceilometer.event.storage import models as event_models
from ceilometer.i18n import _
from ceilometer import storage
LOG = log.getLogger(__name__)
class TraitDescription(base.Base):
"""A description of a trait, with no associated value."""
type = wtypes.text
"the data type, defaults to string"
name = wtypes.text
"the name of the trait"
@classmethod
def sample(cls):
return cls(name='service',
type='string'
)
class EventQuery(base.Query):
"""Query arguments for Event Queries."""
_supported_types = ['integer', 'float', 'string', 'datetime']
type = wsme.wsattr(wtypes.text, default='string')
"the type of the trait filter, defaults to string"
def __repr__(self):
# for logging calls
return '<EventQuery %r %s %r %s>' % (self.field,
self.op,
self._get_value_as_type(),
self.type)
@classmethod
def sample(cls):
return cls(field="event_type",
type="string",
op="eq",
value="compute.instance.create.start")
class Trait(base.Base):
"""A Trait associated with an event."""
name = wtypes.text
"The name of the trait"
value = wtypes.text
"the value of the trait"
type = wtypes.text
"the type of the trait (string, integer, float or datetime)"
@staticmethod
def _convert_storage_trait(trait):
"""Helper method to convert a storage model into an API trait instance.
If an API trait instance is passed in, just return it.
"""
if isinstance(trait, Trait):
return trait
value = (six.text_type(trait.value)
if not trait.dtype == event_models.Trait.DATETIME_TYPE
else trait.value.isoformat())
trait_type = event_models.Trait.get_name_by_type(trait.dtype)
return Trait(name=trait.name, type=trait_type, value=value)
@classmethod
def sample(cls):
return cls(name='service',
type='string',
value='compute.hostname'
)
class Event(base.Base):
"""A System event."""
message_id = wtypes.text
"The message ID for the notification"
event_type = wtypes.text
"The type of the event"
_traits = None
def get_traits(self):
return self._traits
def set_traits(self, traits):
self._traits = map(Trait._convert_storage_trait, traits)
traits = wsme.wsproperty(wtypes.ArrayType(Trait),
get_traits,
set_traits)
"Event specific properties"
generated = datetime.datetime
"The time the event occurred"
raw = base.JsonType()
"The raw copy of notification"
@classmethod
def sample(cls):
return cls(
event_type='compute.instance.update',
generated=datetime.datetime(2015, 1, 1, 12, 30, 59, 123456),
message_id='94834db1-8f1b-404d-b2ec-c35901f1b7f0',
traits={
Trait(name='request_id',
value='req-4e2d67b8-31a4-48af-bb2f-9df72a353a72'),
Trait(name='service',
value='conductor.tem-devstack-01'),
Trait(name='tenant_id',
value='7f13f2b17917463b9ee21aa92c4b36d6')
},
raw={'status': {'nested': 'started'}}
)
def _event_query_to_event_filter(q):
evt_model_filter = {
'event_type': None,
'message_id': None,
'start_timestamp': None,
'end_timestamp': None
}
traits_filter = []
for i in q:
if not i.op:
i.op = 'eq'
elif i.op not in base.operation_kind:
error = _("operator {} is incorrect").format(i.op)
raise base.ClientSideError(error)
if i.field in evt_model_filter:
evt_model_filter[i.field] = i.value
else:
trait_type = i.type or 'string'
traits_filter.append({"key": i.field,
trait_type: i._get_value_as_type(),
"op": i.op})
return storage.EventFilter(traits_filter=traits_filter, **evt_model_filter)
class TraitsController(rest.RestController):
"""Works on Event Traits."""
@v2_utils.requires_admin
@wsme_pecan.wsexpose([Trait], wtypes.text, wtypes.text)
def get_one(self, event_type, trait_name):
"""Return all instances of a trait for an event type.
:param event_type: Event type to filter traits by
:param trait_name: Trait to return values for
"""
LOG.debug(_("Getting traits for %s") % event_type)
return [Trait._convert_storage_trait(t)
for t in pecan.request.event_storage_conn
.get_traits(event_type, trait_name)]
@v2_utils.requires_admin
@wsme_pecan.wsexpose([TraitDescription], wtypes.text)
def get_all(self, event_type):
"""Return all trait names for an event type.
:param event_type: Event type to filter traits by
"""
get_trait_name = event_models.Trait.get_name_by_type
return [TraitDescription(name=t['name'],
type=get_trait_name(t['data_type']))
for t in pecan.request.event_storage_conn
.get_trait_types(event_type)]
class EventTypesController(rest.RestController):
"""Works on Event Types in the system."""
traits = TraitsController()
@v2_utils.requires_admin
@wsme_pecan.wsexpose(None, wtypes.text)
def get_one(self, event_type):
"""Unused API, will always return 404.
:param event_type: A event type
"""
pecan.abort(404)
@v2_utils.requires_admin
@wsme_pecan.wsexpose([six.text_type])
def get_all(self):
"""Get all event types."""
return list(pecan.request.event_storage_conn.get_event_types())
class EventsController(rest.RestController):
"""Works on Events."""
@v2_utils.requires_admin
@wsme_pecan.wsexpose([Event], [EventQuery])
def get_all(self, q=None):
"""Return all events matching the query filters.
:param q: Filter arguments for which Events to return
"""
q = q or []
event_filter = _event_query_to_event_filter(q)
return [Event(message_id=event.message_id,
event_type=event.event_type,
generated=event.generated,
traits=event.traits,
raw=event.raw)
for event in
pecan.request.event_storage_conn.get_events(event_filter)]
@v2_utils.requires_admin
@wsme_pecan.wsexpose(Event, wtypes.text)
def get_one(self, message_id):
"""Return a single event with the given message id.
:param message_id: Message ID of the Event to be returned
"""
event_filter = storage.EventFilter(message_id=message_id)
events = [event for event
in pecan.request.event_storage_conn.get_events(event_filter)]
if not events:
raise base.EntityNotFound(_("Event"), message_id)
if len(events) > 1:
LOG.error(_("More than one event with "
"id %s returned from storage driver") % message_id)
event = events[0]
return Event(message_id=event.message_id,
event_type=event.event_type,
generated=event.generated,
traits=event.traits,
raw=event.raw)

View File

@ -1,495 +0,0 @@
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
# Copyright 2013 IBM Corp.
# Copyright 2013 eNovance <licensing@enovance.com>
# Copyright Ericsson AB 2013. All rights reserved
# Copyright 2014 Hewlett-Packard Company
# Copyright 2015 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import base64
import datetime
from oslo_config import cfg
from oslo_context import context
from oslo_log import log
from oslo_utils import strutils
from oslo_utils import timeutils
import pecan
from pecan import rest
import six
import wsme
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from ceilometer.api.controllers.v2 import base
from ceilometer.api.controllers.v2 import utils as v2_utils
from ceilometer.api import rbac
from ceilometer.i18n import _
from ceilometer.publisher import utils as publisher_utils
from ceilometer import sample
from ceilometer import storage
from ceilometer import utils
LOG = log.getLogger(__name__)
class OldSample(base.Base):
"""A single measurement for a given meter and resource.
This class is deprecated in favor of Sample.
"""
source = wtypes.text
"The ID of the source that identifies where the sample comes from"
counter_name = wsme.wsattr(wtypes.text, mandatory=True)
"The name of the meter"
# FIXME(dhellmann): Make this meter_name?
counter_type = wsme.wsattr(wtypes.text, mandatory=True)
"The type of the meter (see :ref:`measurements`)"
# FIXME(dhellmann): Make this meter_type?
counter_unit = wsme.wsattr(wtypes.text, mandatory=True)
"The unit of measure for the value in counter_volume"
# FIXME(dhellmann): Make this meter_unit?
counter_volume = wsme.wsattr(float, mandatory=True)
"The actual measured value"
user_id = wtypes.text
"The ID of the user who last triggered an update to the resource"
project_id = wtypes.text
"The ID of the project or tenant that owns the resource"
resource_id = wsme.wsattr(wtypes.text, mandatory=True)
"The ID of the :class:`Resource` for which the measurements are taken"
timestamp = datetime.datetime
"UTC date and time when the measurement was made"
recorded_at = datetime.datetime
"When the sample has been recorded."
resource_metadata = {wtypes.text: wtypes.text}
"Arbitrary metadata associated with the resource"
message_id = wtypes.text
"A unique identifier for the sample"
def __init__(self, counter_volume=None, resource_metadata=None,
timestamp=None, **kwds):
resource_metadata = resource_metadata or {}
if counter_volume is not None:
counter_volume = float(counter_volume)
resource_metadata = v2_utils.flatten_metadata(resource_metadata)
# this is to make it easier for clients to pass a timestamp in
if timestamp and isinstance(timestamp, six.string_types):
timestamp = timeutils.parse_isotime(timestamp)
super(OldSample, self).__init__(counter_volume=counter_volume,
resource_metadata=resource_metadata,
timestamp=timestamp, **kwds)
if self.resource_metadata in (wtypes.Unset, None):
self.resource_metadata = {}
@classmethod
def sample(cls):
return cls(source='openstack',
counter_name='instance',
counter_type='gauge',
counter_unit='instance',
counter_volume=1,
resource_id='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
project_id='35b17138-b364-4e6a-a131-8f3099c5be68',
user_id='efd87807-12d2-4b38-9c70-5f5c2ac427ff',
recorded_at=datetime.datetime.utcnow(),
timestamp=datetime.datetime.utcnow(),
resource_metadata={'name1': 'value1',
'name2': 'value2'},
message_id='5460acce-4fd6-480d-ab18-9735ec7b1996',
)
class Statistics(base.Base):
"""Computed statistics for a query."""
groupby = {wtypes.text: wtypes.text}
"Dictionary of field names for group, if groupby statistics are requested"
unit = wtypes.text
"The unit type of the data set"
min = float
"The minimum volume seen in the data"
max = float
"The maximum volume seen in the data"
avg = float
"The average of all of the volume values seen in the data"
sum = float
"The total of all of the volume values seen in the data"
count = int
"The number of samples seen"
aggregate = {wtypes.text: float}
"The selectable aggregate value(s)"
duration = float
"The difference, in seconds, between the oldest and newest timestamp"
duration_start = datetime.datetime
"UTC date and time of the earliest timestamp, or the query start time"
duration_end = datetime.datetime
"UTC date and time of the oldest timestamp, or the query end time"
period = int
"The difference, in seconds, between the period start and end"
period_start = datetime.datetime
"UTC date and time of the period start"
period_end = datetime.datetime
"UTC date and time of the period end"
def __init__(self, start_timestamp=None, end_timestamp=None, **kwds):
super(Statistics, self).__init__(**kwds)
self._update_duration(start_timestamp, end_timestamp)
def _update_duration(self, start_timestamp, end_timestamp):
# "Clamp" the timestamps we return to the original time
# range, excluding the offset.
if (start_timestamp and
self.duration_start and
self.duration_start < start_timestamp):
self.duration_start = start_timestamp
LOG.debug(_('clamping min timestamp to range'))
if (end_timestamp and
self.duration_end and
self.duration_end > end_timestamp):
self.duration_end = end_timestamp
LOG.debug(_('clamping max timestamp to range'))
# If we got valid timestamps back, compute a duration in seconds.
#
# If the min > max after clamping then we know the
# timestamps on the samples fell outside of the time
# range we care about for the query, so treat them as
# "invalid."
#
# If the timestamps are invalid, return None as a
# sentinel indicating that there is something "funny"
# about the range.
if (self.duration_start and
self.duration_end and
self.duration_start <= self.duration_end):
self.duration = timeutils.delta_seconds(self.duration_start,
self.duration_end)
else:
self.duration_start = self.duration_end = self.duration = None
@classmethod
def sample(cls):
return cls(unit='GiB',
min=1,
max=9,
avg=4.5,
sum=45,
count=10,
duration_start=datetime.datetime(2013, 1, 4, 16, 42),
duration_end=datetime.datetime(2013, 1, 4, 16, 47),
period=7200,
period_start=datetime.datetime(2013, 1, 4, 16, 00),
period_end=datetime.datetime(2013, 1, 4, 18, 00),
)
class Aggregate(base.Base):
func = wsme.wsattr(wtypes.text, mandatory=True)
"The aggregation function name"
param = wsme.wsattr(wtypes.text, default=None)
"The paramter to the aggregation function"
def __init__(self, **kwargs):
super(Aggregate, self).__init__(**kwargs)
@staticmethod
def validate(aggregate):
return aggregate
@classmethod
def sample(cls):
return cls(func='cardinality',
param='resource_id')
def _validate_groupby_fields(groupby_fields):
"""Checks that the list of groupby fields from request is valid.
If all fields are valid, returns fields with duplicates removed.
"""
# NOTE(terriyu): Currently, metadata fields are supported in our
# group by statistics implementation only for mongodb
valid_fields = set(['user_id', 'resource_id', 'project_id', 'source',
'resource_metadata.instance_type'])
invalid_fields = set(groupby_fields) - valid_fields
if invalid_fields:
raise wsme.exc.UnknownArgument(invalid_fields,
"Invalid groupby fields")
# Remove duplicate fields
# NOTE(terriyu): This assumes that we don't care about the order of the
# group by fields.
return list(set(groupby_fields))
class MeterController(rest.RestController):
"""Manages operations on a single meter."""
_custom_actions = {
'statistics': ['GET'],
}
def __init__(self, meter_name):
pecan.request.context['meter_name'] = meter_name
self.meter_name = meter_name
@wsme_pecan.wsexpose([OldSample], [base.Query], int)
def get_all(self, q=None, limit=None):
"""Return samples for the meter.
:param q: Filter rules for the data to be returned.
:param limit: Maximum number of samples to return.
"""
rbac.enforce('get_samples', pecan.request)
q = q or []
if limit and limit < 0:
raise base.ClientSideError(_("Limit must be positive"))
kwargs = v2_utils.query_to_kwargs(q, storage.SampleFilter.__init__)
kwargs['meter'] = self.meter_name
f = storage.SampleFilter(**kwargs)
return [OldSample.from_db_model(e)
for e in pecan.request.storage_conn.get_samples(f, limit=limit)
]
@wsme_pecan.wsexpose([OldSample], str, body=[OldSample], status_code=201)
def post(self, direct='', samples=None):
"""Post a list of new Samples to Telemetry.
:param direct: a flag indicates whether the samples will be posted
directly to storage or not.
:param samples: a list of samples within the request body.
"""
rbac.enforce('create_samples', pecan.request)
direct = strutils.bool_from_string(direct)
if not samples:
msg = _('Samples should be included in request body')
raise base.ClientSideError(msg)
now = timeutils.utcnow()
auth_project = rbac.get_limited_to_project(pecan.request.headers)
def_source = pecan.request.cfg.sample_source
def_project_id = pecan.request.headers.get('X-Project-Id')
def_user_id = pecan.request.headers.get('X-User-Id')
published_samples = []
for s in samples:
if self.meter_name != s.counter_name:
raise wsme.exc.InvalidInput('counter_name', s.counter_name,
'should be %s' % self.meter_name)
if s.message_id:
raise wsme.exc.InvalidInput('message_id', s.message_id,
'The message_id must not be set')
if s.counter_type not in sample.TYPES:
raise wsme.exc.InvalidInput('counter_type', s.counter_type,
'The counter type must be: ' +
', '.join(sample.TYPES))
s.user_id = (s.user_id or def_user_id)
s.project_id = (s.project_id or def_project_id)
s.source = '%s:%s' % (s.project_id, (s.source or def_source))
s.timestamp = (s.timestamp or now)
if auth_project and auth_project != s.project_id:
# non admin user trying to cross post to another project_id
auth_msg = 'can not post samples to other projects'
raise wsme.exc.InvalidInput('project_id', s.project_id,
auth_msg)
published_sample = sample.Sample(
name=s.counter_name,
type=s.counter_type,
unit=s.counter_unit,
volume=s.counter_volume,
user_id=s.user_id,
project_id=s.project_id,
resource_id=s.resource_id,
timestamp=s.timestamp.isoformat(),
resource_metadata=utils.restore_nesting(s.resource_metadata,
separator='.'),
source=s.source)
s.message_id = published_sample.id
sample_dict = publisher_utils.meter_message_from_counter(
published_sample, cfg.CONF.publisher.telemetry_secret)
if direct:
ts = timeutils.parse_isotime(sample_dict['timestamp'])
sample_dict['timestamp'] = timeutils.normalize_time(ts)
pecan.request.storage_conn.record_metering_data(sample_dict)
else:
published_samples.append(sample_dict)
if not direct:
ctxt = context.RequestContext(user=def_user_id,
tenant=def_project_id,
is_admin=True)
notifier = pecan.request.notifier
notifier.info(ctxt, 'telemetry.api', published_samples)
return samples
@wsme_pecan.wsexpose([Statistics],
[base.Query], [six.text_type], int, [Aggregate])
def statistics(self, q=None, groupby=None, period=None, aggregate=None):
"""Computes the statistics of the samples in the time range given.
:param q: Filter rules for the data to be returned.
:param groupby: Fields for group by aggregation
:param period: Returned result will be an array of statistics for a
period long of that number of seconds.
:param aggregate: The selectable aggregation functions to be applied.
"""
rbac.enforce('compute_statistics', pecan.request)
q = q or []
groupby = groupby or []
aggregate = aggregate or []
if period and period < 0:
raise base.ClientSideError(_("Period must be positive."))
kwargs = v2_utils.query_to_kwargs(q, storage.SampleFilter.__init__)
kwargs['meter'] = self.meter_name
f = storage.SampleFilter(**kwargs)
g = _validate_groupby_fields(groupby)
aggregate = utils.uniq(aggregate, ['func', 'param'])
# Find the original timestamp in the query to use for clamping
# the duration returned in the statistics.
start = end = None
for i in q:
if i.field == 'timestamp' and i.op in ('lt', 'le'):
end = timeutils.parse_isotime(i.value).replace(
tzinfo=None)
elif i.field == 'timestamp' and i.op in ('gt', 'ge'):
start = timeutils.parse_isotime(i.value).replace(
tzinfo=None)
try:
computed = pecan.request.storage_conn.get_meter_statistics(
f, period, g, aggregate)
return [Statistics(start_timestamp=start,
end_timestamp=end,
**c.as_dict())
for c in computed]
except OverflowError as e:
params = dict(period=period, err=e)
raise base.ClientSideError(
_("Invalid period %(period)s: %(err)s") % params)
class Meter(base.Base):
"""One category of measurements."""
name = wtypes.text
"The unique name for the meter"
type = wtypes.Enum(str, *sample.TYPES)
"The meter type (see :ref:`measurements`)"
unit = wtypes.text
"The unit of measure"
resource_id = wtypes.text
"The ID of the :class:`Resource` for which the measurements are taken"
project_id = wtypes.text
"The ID of the project or tenant that owns the resource"
user_id = wtypes.text
"The ID of the user who last triggered an update to the resource"
source = wtypes.text
"The ID of the source that identifies where the meter comes from"
meter_id = wtypes.text
"The unique identifier for the meter"
def __init__(self, **kwargs):
meter_id = '%s+%s' % (kwargs['resource_id'], kwargs['name'])
# meter_id is of type Unicode but base64.encodestring() only accepts
# strings. See bug #1333177
meter_id = base64.encodestring(meter_id.encode('utf-8'))
kwargs['meter_id'] = meter_id
super(Meter, self).__init__(**kwargs)
@classmethod
def sample(cls):
return cls(name='instance',
type='gauge',
unit='instance',
resource_id='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
project_id='35b17138-b364-4e6a-a131-8f3099c5be68',
user_id='efd87807-12d2-4b38-9c70-5f5c2ac427ff',
source='openstack',
)
class MetersController(rest.RestController):
"""Works on meters."""
@pecan.expose()
def _lookup(self, meter_name, *remainder):
return MeterController(meter_name), remainder
@wsme_pecan.wsexpose([Meter], [base.Query])
def get_all(self, q=None):
"""Return all known meters, based on the data recorded so far.
:param q: Filter rules for the meters to be returned.
"""
rbac.enforce('get_meters', pecan.request)
q = q or []
# Timestamp field is not supported for Meter queries
kwargs = v2_utils.query_to_kwargs(
q, pecan.request.storage_conn.get_meters, allow_timestamps=False)
return [Meter.from_db_model(m)
for m in pecan.request.storage_conn.get_meters(**kwargs)]

View File

@ -31,10 +31,8 @@ import wsmeext.pecan as wsme_pecan
from ceilometer.alarm.storage import models as alarm_models
from ceilometer.api.controllers.v2 import alarms
from ceilometer.api.controllers.v2 import base
from ceilometer.api.controllers.v2 import samples
from ceilometer.api import rbac
from ceilometer.i18n import _
from ceilometer import storage
from ceilometer import utils
LOG = log.getLogger(__name__)
@ -329,36 +327,6 @@ class ValidatedComplexQuery(object):
jsonschema.validate(orderby_expr, self.orderby_schema)
class QuerySamplesController(rest.RestController):
"""Provides complex query possibilities for samples."""
@wsme_pecan.wsexpose([samples.Sample], body=ComplexQuery)
def post(self, body):
"""Define query for retrieving Sample data.
:param body: Query rules for the samples to be returned.
"""
rbac.enforce('query_sample', pecan.request)
sample_name_mapping = {"resource": "resource_id",
"meter": "counter_name",
"type": "counter_type",
"unit": "counter_unit",
"volume": "counter_volume"}
query = ValidatedComplexQuery(body,
storage.models.Sample,
sample_name_mapping,
metadata_allowed=True)
query.validate(visibility_field="project_id")
conn = pecan.request.storage_conn
return [samples.Sample.from_db_model(s)
for s in conn.query_samples(query.filter_expr,
query.orderby,
query.limit)]
class QueryAlarmHistoryController(rest.RestController):
"""Provides complex query possibilities for alarm history."""
@wsme_pecan.wsexpose([alarms.AlarmChange], body=ComplexQuery)
@ -405,5 +373,4 @@ class QueryAlarmsController(rest.RestController):
class QueryController(rest.RestController):
samples = QuerySamplesController()
alarms = QueryAlarmsController()

View File

@ -1,150 +0,0 @@
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
# Copyright 2013 IBM Corp.
# Copyright 2013 eNovance <licensing@enovance.com>
# Copyright Ericsson AB 2013. All rights reserved
# Copyright 2014 Hewlett-Packard Company
# Copyright 2015 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import pecan
from pecan import rest
import six
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from ceilometer.api.controllers.v2 import base
from ceilometer.api.controllers.v2 import utils
from ceilometer.api import rbac
from ceilometer.i18n import _
class Resource(base.Base):
"""An externally defined object for which samples have been received."""
resource_id = wtypes.text
"The unique identifier for the resource"
project_id = wtypes.text
"The ID of the owning project or tenant"
user_id = wtypes.text
"The ID of the user who created the resource or updated it last"
first_sample_timestamp = datetime.datetime
"UTC date & time not later than the first sample known for this resource"
last_sample_timestamp = datetime.datetime
"UTC date & time not earlier than the last sample known for this resource"
metadata = {wtypes.text: wtypes.text}
"Arbitrary metadata associated with the resource"
links = [base.Link]
"A list containing a self link and associated meter links"
source = wtypes.text
"The source where the resource come from"
def __init__(self, metadata=None, **kwds):
metadata = metadata or {}
metadata = utils.flatten_metadata(metadata)
super(Resource, self).__init__(metadata=metadata, **kwds)
@classmethod
def sample(cls):
return cls(
resource_id='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
project_id='35b17138-b364-4e6a-a131-8f3099c5be68',
user_id='efd87807-12d2-4b38-9c70-5f5c2ac427ff',
timestamp=datetime.datetime.utcnow(),
source="openstack",
metadata={'name1': 'value1',
'name2': 'value2'},
links=[
base.Link(href=('http://localhost:8777/v2/resources/'
'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36'),
rel='self'),
base.Link(href=('http://localhost:8777/v2/meters/volume?'
'q.field=resource_id&q.value='
'bd9431c1-8d69-4ad3-803a-8d4a6b89fd36'),
rel='volume')
],
)
class ResourcesController(rest.RestController):
"""Works on resources."""
@staticmethod
def _make_link(rel_name, url, type, type_arg, query=None):
query_str = ''
if query:
query_str = '?q.field=%s&q.value=%s' % (query['field'],
query['value'])
return base.Link(href='%s/v2/%s/%s%s' % (url, type,
type_arg, query_str),
rel=rel_name)
def _resource_links(self, resource_id, meter_links=1):
links = [self._make_link('self', pecan.request.host_url,
'resources', resource_id)]
if meter_links:
for meter in pecan.request.storage_conn.get_meters(
resource=resource_id):
query = {'field': 'resource_id', 'value': resource_id}
links.append(self._make_link(meter.name,
pecan.request.host_url,
'meters', meter.name,
query=query))
return links
@wsme_pecan.wsexpose(Resource, six.text_type)
def get_one(self, resource_id):
"""Retrieve details about one resource.
:param resource_id: The UUID of the resource.
"""
rbac.enforce('get_resource', pecan.request)
authorized_project = rbac.get_limited_to_project(pecan.request.headers)
resources = list(pecan.request.storage_conn.get_resources(
resource=resource_id, project=authorized_project))
if not resources:
raise base.EntityNotFound(_('Resource'), resource_id)
return Resource.from_db_and_links(resources[0],
self._resource_links(resource_id))
@wsme_pecan.wsexpose([Resource], [base.Query], int)
def get_all(self, q=None, meter_links=1):
"""Retrieve definitions of all of the resources.
:param q: Filter rules for the resources to be returned.
:param meter_links: option to include related meter links
"""
rbac.enforce('get_resources', pecan.request)
q = q or []
kwargs = utils.query_to_kwargs(
q, pecan.request.storage_conn.get_resources)
resources = [
Resource.from_db_and_links(r,
self._resource_links(r.resource_id,
meter_links))
for r in pecan.request.storage_conn.get_resources(**kwargs)]
return resources

View File

@ -20,21 +20,12 @@
from ceilometer.api.controllers.v2 import alarms
from ceilometer.api.controllers.v2 import capabilities
from ceilometer.api.controllers.v2 import events
from ceilometer.api.controllers.v2 import meters
from ceilometer.api.controllers.v2 import query
from ceilometer.api.controllers.v2 import resources
from ceilometer.api.controllers.v2 import samples
class V2Controller(object):
"""Version 2 API controller root."""
resources = resources.ResourcesController()
meters = meters.MetersController()
samples = samples.SamplesController()
alarms = alarms.AlarmsController()
event_types = events.EventTypesController()
events = events.EventsController()
query = query.QueryController()
capabilities = capabilities.CapabilitiesController()

View File

@ -1,147 +0,0 @@
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
# Copyright 2013 IBM Corp.
# Copyright 2013 eNovance <licensing@enovance.com>
# Copyright Ericsson AB 2013. All rights reserved
# Copyright 2014 Hewlett-Packard Company
# Copyright 2015 Huawei Technologies Co., Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import uuid
from oslo_utils import timeutils
import pecan
from pecan import rest
from wsme import types as wtypes
import wsmeext.pecan as wsme_pecan
from ceilometer.api.controllers.v2 import base
from ceilometer.api.controllers.v2 import utils
from ceilometer.api import rbac
from ceilometer.i18n import _
from ceilometer import sample
from ceilometer import storage
class Sample(base.Base):
"""One measurement."""
id = wtypes.text
"The unique identifier for the sample."
meter = wtypes.text
"The meter name this sample is for."
type = wtypes.Enum(str, *sample.TYPES)
"The meter type (see :ref:`meter_types`)"
unit = wtypes.text
"The unit of measure."
volume = float
"The metered value."
user_id = wtypes.text
"The user this sample was taken for."
project_id = wtypes.text
"The project this sample was taken for."
resource_id = wtypes.text
"The :class:`Resource` this sample was taken for."
source = wtypes.text
"The source that identifies where the sample comes from."
timestamp = datetime.datetime
"When the sample has been generated."
recorded_at = datetime.datetime
"When the sample has been recorded."
metadata = {wtypes.text: wtypes.text}
"Arbitrary metadata associated with the sample."
@classmethod
def from_db_model(cls, m):
return cls(id=m.message_id,
meter=m.counter_name,
type=m.counter_type,
unit=m.counter_unit,
volume=m.counter_volume,
user_id=m.user_id,
project_id=m.project_id,
resource_id=m.resource_id,
source=m.source,
timestamp=m.timestamp,
recorded_at=m.recorded_at,
metadata=utils.flatten_metadata(m.resource_metadata))
@classmethod
def sample(cls):
return cls(id=str(uuid.uuid1()),
meter='instance',
type='gauge',
unit='instance',
volume=1,
resource_id='bd9431c1-8d69-4ad3-803a-8d4a6b89fd36',
project_id='35b17138-b364-4e6a-a131-8f3099c5be68',
user_id='efd87807-12d2-4b38-9c70-5f5c2ac427ff',
timestamp=timeutils.utcnow(),
recorded_at=datetime.datetime.utcnow(),
source='openstack',
metadata={'name1': 'value1',
'name2': 'value2'},
)
class SamplesController(rest.RestController):
"""Controller managing the samples."""
@wsme_pecan.wsexpose([Sample], [base.Query], int)
def get_all(self, q=None, limit=None):
"""Return all known samples, based on the data recorded so far.
:param q: Filter rules for the samples to be returned.
:param limit: Maximum number of samples to be returned.
"""
rbac.enforce('get_samples', pecan.request)
q = q or []
if limit and limit < 0:
raise base.ClientSideError(_("Limit must be positive"))
kwargs = utils.query_to_kwargs(q, storage.SampleFilter.__init__)
f = storage.SampleFilter(**kwargs)
return map(Sample.from_db_model,
pecan.request.storage_conn.get_samples(f, limit=limit))
@wsme_pecan.wsexpose(Sample, wtypes.text)
def get_one(self, sample_id):
"""Return a sample.
:param sample_id: the id of the sample.
"""
rbac.enforce('get_sample', pecan.request)
f = storage.SampleFilter(message_id=sample_id)
samples = list(pecan.request.storage_conn.get_samples(f))
if len(samples) < 1:
raise base.EntityNotFound(_('Sample'), sample_id)
return Sample.from_db_model(samples[0])

View File

@ -20,17 +20,14 @@
import copy
import datetime
import functools
import inspect
from oslo_utils import timeutils
import pecan
import six
import wsme
from ceilometer.api.controllers.v2 import base
from ceilometer.api import rbac
from ceilometer import utils
def get_auth_project(on_behalf_of=None):
@ -292,46 +289,3 @@ def _get_query_timestamps(args=None):
'end_timestamp': end_timestamp,
'start_timestamp_op': args.get('start_timestamp_op'),
'end_timestamp_op': args.get('end_timestamp_op')}
def flatten_metadata(metadata):
"""Return flattened resource metadata.
Metadata is returned with flattened nested structures (except nested sets)
and with all values converted to unicode strings.
"""
if metadata:
# After changing recursive_keypairs` output we need to keep
# flattening output unchanged.
# Example: recursive_keypairs({'a': {'b':{'c':'d'}}}, '.')
# output before: a.b:c=d
# output now: a.b.c=d
# So to keep the first variant just replace all dots except the first
return dict((k.replace('.', ':').replace(':', '.', 1),
six.text_type(v))
for k, v in utils.recursive_keypairs(metadata,
separator='.')
if type(v) is not set)
return {}
# TODO(fabiog): this decorator should disappear and have a more unified
# way of controlling access and scope. Before messing with this, though
# I feel this file should be re-factored in smaller chunks one for each
# controller (e.g. meters, alarms and so on ...). Right now its size is
# overwhelming.
def requires_admin(func):
@functools.wraps(func)
def wrapped(*args, **kwargs):
usr_limit, proj_limit = rbac.get_limited_to(pecan.request.headers)
# If User and Project are None, you have full access.
if usr_limit and proj_limit:
# since this decorator get's called out of wsme context
# raising exception results internal error so call abort
# for handling the error
ex = base.ProjectNotAuthorized(proj_limit)
pecan.core.abort(status_code=ex.code, detail=ex.msg)
return func(*args, **kwargs)
return wrapped

View File

@ -16,16 +16,9 @@
import threading
from oslo_config import cfg
from oslo_log import log
from pecan import hooks
from ceilometer.i18n import _LE
from ceilometer import messaging
from ceilometer import storage
LOG = log.getLogger(__name__)
class ConfigHook(hooks.PecanHook):
"""Attach the configuration object to the request.
@ -40,48 +33,12 @@ class ConfigHook(hooks.PecanHook):
class DBHook(hooks.PecanHook):
def __init__(self):
self.storage_connection = DBHook.get_connection('metering')
self.event_storage_connection = DBHook.get_connection('event')
self.alarm_storage_connection = DBHook.get_connection('alarm')
if (not self.storage_connection and
not self.event_storage_connection and
not self.alarm_storage_connection):
raise Exception("Api failed to start. Failed to connect to "
"databases, purpose: %s" %
', '.join(['metering', 'event', 'alarm']))
def __init__(self, alarm_conn):
self.alarm_storage_connection = alarm_conn
def before(self, state):
state.request.storage_conn = self.storage_connection
state.request.event_storage_conn = self.event_storage_connection
state.request.alarm_storage_conn = self.alarm_storage_connection
@staticmethod
def get_connection(purpose):
try:
return storage.get_connection_from_config(cfg.CONF, purpose)
except Exception as err:
params = {"purpose": purpose, "err": err}
LOG.exception(_LE("Failed to connect to db, purpose %(purpose)s "
"retry later: %(err)s") % params)
class NotifierHook(hooks.PecanHook):
"""Create and attach a notifier to the request.
Usually, samples will be push to notification bus by notifier when they
are posted via /v2/meters/ API.
"""
def __init__(self):
transport = messaging.get_transport()
self.notifier = messaging.get_notifier(transport,
publisher_id="ceilometer.api")
def before(self, state):
state.request.notifier = self.notifier
class TranslationHook(hooks.PecanHook):

View File

@ -1,29 +0,0 @@
# -*- encoding: utf-8 -*-
#
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_service import service as os_service
from ceilometer import notification
from ceilometer import service
CONF = cfg.CONF
def main():
service.prepare_service()
os_service.launch(CONF, notification.NotificationService(),
workers=service.get_workers('notification')).wait()

View File

@ -1,29 +0,0 @@
# -*- encoding: utf-8 -*-
#
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_service import service as os_service
from ceilometer import collector
from ceilometer import service
CONF = cfg.CONF
def main():
service.prepare_service()
os_service.launch(CONF, collector.CollectorService(),
workers=service.get_workers('collector')).wait()

View File

@ -1,94 +0,0 @@
# -*- encoding: utf-8 -*-
#
# Copyright 2014 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
from oslo_service import service as os_service
from ceilometer.agent import manager
from ceilometer.i18n import _LW
from ceilometer import service
LOG = log.getLogger(__name__)
CONF = cfg.CONF
class MultiChoicesOpt(cfg.Opt):
def __init__(self, name, choices=None, **kwargs):
super(MultiChoicesOpt, self).__init__(name,
type=DeduplicatedCfgList(),
**kwargs)
self.choices = choices
def _get_argparse_kwargs(self, group, **kwargs):
"""Extends the base argparse keyword dict for multi choices options."""
kwargs = super(MultiChoicesOpt, self)._get_argparse_kwargs(group)
kwargs['nargs'] = '+'
choices = kwargs.get('choices', self.choices)
if choices:
kwargs['choices'] = choices
return kwargs
class DeduplicatedCfgList(cfg.types.List):
def __call__(self, *args, **kwargs):
result = super(DeduplicatedCfgList, self).__call__(*args, **kwargs)
if len(result) != len(set(result)):
LOG.warning(_LW("Duplicated values: %s found in CLI options, "
"auto de-duplidated"), result)
result = list(set(result))
return result
CLI_OPTS = [
MultiChoicesOpt('polling-namespaces',
default=['compute', 'central'],
choices=['compute', 'central', 'ipmi'],
dest='polling_namespaces',
help='Polling namespace(s) to be used while '
'resource polling'),
MultiChoicesOpt('pollster-list',
default=[],
dest='pollster_list',
help='List of pollsters (or wildcard templates) to be '
'used while polling'),
]
CONF.register_cli_opts(CLI_OPTS)
def main():
service.prepare_service()
os_service.launch(CONF, manager.AgentManager(CONF.polling_namespaces,
CONF.pollster_list)).wait()
# todo(dbelova): remove it someday. Needed for backward compatibility
def main_compute():
service.prepare_service()
os_service.launch(CONF, manager.AgentManager(['compute'])).wait()
# todo(dbelova): remove it someday. Needed for backward compatibility
def main_central():
service.prepare_service()
os_service.launch(CONF, manager.AgentManager(['central'])).wait()
def main_ipmi():
service.prepare_service()
os_service.launch(CONF, manager.AgentManager(['ipmi'])).wait()

View File

@ -1,95 +0,0 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2012-2014 Julien Danjou
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Command line tool for creating meter for Ceilometer.
"""
import logging
import sys
from oslo_config import cfg
from oslo_context import context
from oslo_utils import timeutils
from stevedore import extension
from ceilometer import pipeline
from ceilometer import sample
from ceilometer import service
def send_sample():
cfg.CONF.register_cli_opts([
cfg.StrOpt('sample-name',
short='n',
help='Meter name.',
required=True),
cfg.StrOpt('sample-type',
short='y',
help='Meter type (gauge, delta, cumulative).',
default='gauge',
required=True),
cfg.StrOpt('sample-unit',
short='U',
help='Meter unit.'),
cfg.IntOpt('sample-volume',
short='l',
help='Meter volume value.',
default=1),
cfg.StrOpt('sample-resource',
short='r',
help='Meter resource id.',
required=True),
cfg.StrOpt('sample-user',
short='u',
help='Meter user id.'),
cfg.StrOpt('sample-project',
short='p',
help='Meter project id.'),
cfg.StrOpt('sample-timestamp',
short='i',
help='Meter timestamp.',
default=timeutils.utcnow().isoformat()),
cfg.StrOpt('sample-metadata',
short='m',
help='Meter metadata.'),
])
service.prepare_service()
# Set up logging to use the console
console = logging.StreamHandler(sys.stderr)
console.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(message)s')
console.setFormatter(formatter)
root_logger = logging.getLogger('')
root_logger.addHandler(console)
root_logger.setLevel(logging.DEBUG)
pipeline_manager = pipeline.setup_pipeline(
extension.ExtensionManager('ceilometer.transformer'))
with pipeline_manager.publisher(context.get_admin_context()) as p:
p([sample.Sample(
name=cfg.CONF.sample_name,
type=cfg.CONF.sample_type,
unit=cfg.CONF.sample_unit,
volume=cfg.CONF.sample_volume,
user_id=cfg.CONF.sample_user,
project_id=cfg.CONF.sample_project,
resource_id=cfg.CONF.sample_resource,
timestamp=cfg.CONF.sample_timestamp,
resource_metadata=cfg.CONF.sample_metadata and eval(
cfg.CONF.sample_metadata))])

View File

@ -18,7 +18,7 @@ import logging
from oslo_config import cfg
from ceilometer.i18n import _, _LI
from ceilometer.i18n import _LI
from ceilometer import service
from ceilometer import storage
@ -28,32 +28,12 @@ LOG = logging.getLogger(__name__)
def dbsync():
service.prepare_service()
storage.get_connection_from_config(cfg.CONF, 'metering').upgrade()
storage.get_connection_from_config(cfg.CONF, 'alarm').upgrade()
storage.get_connection_from_config(cfg.CONF, 'event').upgrade()
def expirer():
service.prepare_service()
if cfg.CONF.database.metering_time_to_live > 0:
LOG.debug(_("Clearing expired metering data"))
storage_conn = storage.get_connection_from_config(cfg.CONF, 'metering')
storage_conn.clear_expired_metering_data(
cfg.CONF.database.metering_time_to_live)
else:
LOG.info(_LI("Nothing to clean, database metering time to live "
"is disabled"))
if cfg.CONF.database.event_time_to_live > 0:
LOG.debug(_("Clearing expired event data"))
event_conn = storage.get_connection_from_config(cfg.CONF, 'event')
event_conn.clear_expired_event_data(
cfg.CONF.database.event_time_to_live)
else:
LOG.info(_LI("Nothing to clean, database event time to live "
"is disabled"))
if cfg.CONF.database.alarm_history_time_to_live > 0:
LOG.debug("Clearing expired alarm history data")
storage_conn = storage.get_connection_from_config(cfg.CONF, 'alarm')

View File

@ -1,191 +0,0 @@
#
# Copyright 2012-2013 eNovance <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import socket
import msgpack
from oslo_config import cfg
from oslo_log import log
import oslo_messaging
from oslo_service import service as os_service
from oslo_utils import netutils
from oslo_utils import units
from ceilometer import dispatcher
from ceilometer import messaging
from ceilometer.i18n import _, _LE
from ceilometer import utils
OPTS = [
cfg.StrOpt('udp_address',
default='0.0.0.0',
help='Address to which the UDP socket is bound. Set to '
'an empty string to disable.'),
cfg.IntOpt('udp_port',
default=4952,
help='Port to which the UDP socket is bound.'),
cfg.BoolOpt('requeue_sample_on_dispatcher_error',
default=False,
help='Requeue the sample on the collector sample queue '
'when the collector fails to dispatch it. This is only valid '
'if the sample come from the notifier publisher.'),
cfg.BoolOpt('requeue_event_on_dispatcher_error',
default=False,
help='Requeue the event on the collector event queue '
'when the collector fails to dispatch it.'),
]
cfg.CONF.register_opts(OPTS, group="collector")
cfg.CONF.import_opt('metering_topic', 'ceilometer.publisher.messaging',
group='publisher_rpc')
cfg.CONF.import_opt('metering_topic', 'ceilometer.publisher.messaging',
group='publisher_notifier')
cfg.CONF.import_opt('event_topic', 'ceilometer.publisher.messaging',
group='publisher_notifier')
cfg.CONF.import_opt('store_events', 'ceilometer.notification',
group='notification')
LOG = log.getLogger(__name__)
class CollectorService(os_service.Service):
"""Listener for the collector service."""
def start(self):
"""Bind the UDP socket and handle incoming data."""
# ensure dispatcher is configured before starting other services
self.dispatcher_manager = dispatcher.load_dispatcher_manager()
self.rpc_server = None
self.sample_listener = None
self.event_listener = None
super(CollectorService, self).start()
if cfg.CONF.collector.udp_address:
self.tg.add_thread(self.start_udp)
transport = messaging.get_transport(optional=True)
if transport:
self.rpc_server = messaging.get_rpc_server(
transport, cfg.CONF.publisher_rpc.metering_topic, self)
sample_target = oslo_messaging.Target(
topic=cfg.CONF.publisher_notifier.metering_topic)
self.sample_listener = messaging.get_notification_listener(
transport, [sample_target],
[SampleEndpoint(self.dispatcher_manager)],
allow_requeue=(cfg.CONF.collector.
requeue_sample_on_dispatcher_error))
if cfg.CONF.notification.store_events:
event_target = oslo_messaging.Target(
topic=cfg.CONF.publisher_notifier.event_topic)
self.event_listener = messaging.get_notification_listener(
transport, [event_target],
[EventEndpoint(self.dispatcher_manager)],
allow_requeue=(cfg.CONF.collector.
requeue_event_on_dispatcher_error))
self.event_listener.start()
self.rpc_server.start()
self.sample_listener.start()
if not cfg.CONF.collector.udp_address:
# Add a dummy thread to have wait() working
self.tg.add_timer(604800, lambda: None)
def start_udp(self):
address_family = socket.AF_INET
if netutils.is_valid_ipv6(cfg.CONF.collector.udp_address):
address_family = socket.AF_INET6
udp = socket.socket(address_family, socket.SOCK_DGRAM)
udp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
udp.bind((cfg.CONF.collector.udp_address,
cfg.CONF.collector.udp_port))
self.udp_run = True
while self.udp_run:
# NOTE(jd) Arbitrary limit of 64K because that ought to be
# enough for anybody.
data, source = udp.recvfrom(64 * units.Ki)
try:
sample = msgpack.loads(data, encoding='utf-8')
except Exception:
LOG.warn(_("UDP: Cannot decode data sent by %s"), source)
else:
try:
LOG.debug(_("UDP: Storing %s"), sample)
self.dispatcher_manager.map_method('record_metering_data',
sample)
except Exception:
LOG.exception(_("UDP: Unable to store meter"))
def stop(self):
self.udp_run = False
if self.rpc_server:
self.rpc_server.stop()
if self.sample_listener:
utils.kill_listeners([self.sample_listener])
if self.event_listener:
utils.kill_listeners([self.event_listener])
super(CollectorService, self).stop()
def record_metering_data(self, context, data):
"""RPC endpoint for messages we send to ourselves.
When the notification messages are re-published through the
RPC publisher, this method receives them for processing.
"""
self.dispatcher_manager.map_method('record_metering_data', data=data)
class CollectorEndpoint(object):
def __init__(self, dispatcher_manager, requeue_on_error):
self.dispatcher_manager = dispatcher_manager
self.requeue_on_error = requeue_on_error
def sample(self, ctxt, publisher_id, event_type, payload, metadata):
"""RPC endpoint for notification messages
When another service sends a notification over the message
bus, this method receives it.
"""
try:
self.dispatcher_manager.map_method(self.method, payload)
except Exception:
if self.requeue_on_error:
LOG.exception(_LE("Dispatcher failed to handle the %s, "
"requeue it."), self.ep_type)
return oslo_messaging.NotificationResult.REQUEUE
raise
class SampleEndpoint(CollectorEndpoint):
method = 'record_metering_data'
ep_type = 'sample'
def __init__(self, dispatcher_manager):
super(SampleEndpoint, self).__init__(
dispatcher_manager,
cfg.CONF.collector.requeue_sample_on_dispatcher_error)
class EventEndpoint(CollectorEndpoint):
method = 'record_events'
ep_type = 'event'
def __init__(self, dispatcher_manager):
super(EventEndpoint, self).__init__(
dispatcher_manager,
cfg.CONF.collector.requeue_event_on_dispatcher_error)

View File

@ -1,46 +0,0 @@
#
# Copyright 2014 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from ceilometer.agent import plugin_base
from ceilometer import nova_client
OPTS = [
cfg.BoolOpt('workload_partitioning',
default=False,
help='Enable work-load partitioning, allowing multiple '
'compute agents to be run simultaneously.')
]
cfg.CONF.register_opts(OPTS, group='compute')
class InstanceDiscovery(plugin_base.DiscoveryBase):
def __init__(self):
super(InstanceDiscovery, self).__init__()
self.nova_cli = nova_client.Client()
def discover(self, manager, param=None):
"""Discover resources to monitor."""
instances = self.nova_cli.instance_get_all_by_host(cfg.CONF.host)
return [i for i in instances
if getattr(i, 'OS-EXT-STS:vm_state', None) != 'error']
@property
def group_id(self):
if cfg.CONF.compute.workload_partitioning:
return cfg.CONF.host
else:
return None

View File

@ -1,42 +0,0 @@
#
# Copyright 2013 Intel
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
import oslo_messaging
from ceilometer.agent import plugin_base
OPTS = [
cfg.StrOpt('nova_control_exchange',
default='nova',
help="Exchange name for Nova notifications."),
]
cfg.CONF.register_opts(OPTS)
class ComputeNotificationBase(plugin_base.NotificationBase):
@staticmethod
def get_targets(conf):
"""Return a sequence of oslo_messaging.Target
This sequence is defining the exchange and topics to be connected for
this plugin.
"""
return [oslo_messaging.Target(topic=topic,
exchange=conf.nova_control_exchange)
for topic in conf.notification_topics]

View File

@ -1,136 +0,0 @@
#
# Copyright 2013 Intel
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Converters for producing compute CPU sample messages from notification
events.
"""
from oslo_log import log
from oslo_utils import timeutils
from ceilometer.compute import notifications
from ceilometer.i18n import _
from ceilometer import sample
LOG = log.getLogger(__name__)
class ComputeMetricsNotificationBase(notifications.ComputeNotificationBase):
"""Convert compute.metrics.update notifications into Samples."""
event_types = ['compute.metrics.update']
metric = None
sample_type = None
unit = None
@staticmethod
def _get_sample(message, name):
try:
for metric in message['payload']['metrics']:
if name == metric['name']:
info = {'payload': metric,
'event_type': message['event_type'],
'publisher_id': message['publisher_id'],
'resource_id': '%s_%s' % (
message['payload']['host'],
message['payload']['nodename']),
'timestamp': str(timeutils.parse_strtime(
metric['timestamp']))}
return info
except Exception as err:
LOG.warning(_('An error occurred while building %(m)s '
'sample: %(e)s') % {'m': name, 'e': err})
def process_notification(self, message):
info = self._get_sample(message, self.metric)
if info:
yield sample.Sample.from_notification(
name='compute.node.%s' % self.metric,
type=self.sample_type,
unit=self.unit,
volume=(info['payload']['value'] * 100 if self.unit == '%'
else info['payload']['value']),
user_id=None,
project_id=None,
resource_id=info['resource_id'],
message=info)
class CpuFrequency(ComputeMetricsNotificationBase):
"""Handle CPU current frequency message."""
metric = 'cpu.frequency'
sample_type = sample.TYPE_GAUGE
unit = 'MHz'
class CpuUserTime(ComputeMetricsNotificationBase):
"""Handle CPU user mode time message."""
metric = 'cpu.user.time'
sample_type = sample.TYPE_CUMULATIVE
unit = 'ns'
class CpuKernelTime(ComputeMetricsNotificationBase):
"""Handle CPU kernel time message."""
metric = 'cpu.kernel.time'
unit = 'ns'
sample_type = sample.TYPE_CUMULATIVE
class CpuIdleTime(ComputeMetricsNotificationBase):
"""Handle CPU idle time message."""
metric = 'cpu.idle.time'
unit = 'ns'
sample_type = sample.TYPE_CUMULATIVE
class CpuIowaitTime(ComputeMetricsNotificationBase):
"""Handle CPU I/O wait time message."""
metric = 'cpu.iowait.time'
unit = 'ns'
sample_type = sample.TYPE_CUMULATIVE
class CpuKernelPercent(ComputeMetricsNotificationBase):
"""Handle CPU kernel percentage message."""
metric = 'cpu.kernel.percent'
unit = '%'
sample_type = sample.TYPE_GAUGE
class CpuIdlePercent(ComputeMetricsNotificationBase):
"""Handle CPU idle percentage message."""
metric = 'cpu.idle.percent'
unit = '%'
sample_type = sample.TYPE_GAUGE
class CpuUserPercent(ComputeMetricsNotificationBase):
"""Handle CPU user mode percentage message."""
metric = 'cpu.user.percent'
unit = '%'
sample_type = sample.TYPE_GAUGE
class CpuIowaitPercent(ComputeMetricsNotificationBase):
"""Handle CPU I/O wait percentage message."""
metric = 'cpu.iowait.percent'
unit = '%'
sample_type = sample.TYPE_GAUGE
class CpuPercent(ComputeMetricsNotificationBase):
"""Handle generic CPU utilization message."""
metric = 'cpu.percent'
unit = '%'
sample_type = sample.TYPE_GAUGE

View File

@ -1,178 +0,0 @@
#
# Copyright 2012 New Dream Network, LLC (DreamHost)
# Copyright 2013 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Converters for producing compute sample messages from notification events.
"""
import abc
import six
from ceilometer.agent import plugin_base
from ceilometer.compute import notifications
from ceilometer.compute import util
from ceilometer import sample
@six.add_metaclass(abc.ABCMeta)
class UserMetadataAwareInstanceNotificationBase(
notifications.ComputeNotificationBase):
"""Consumes notifications containing instance user metadata."""
def process_notification(self, message):
instance_properties = self.get_instance_properties(message)
if isinstance(instance_properties.get('metadata'), dict):
src_metadata = instance_properties['metadata']
del instance_properties['metadata']
util.add_reserved_user_metadata(src_metadata, instance_properties)
return self.get_sample(message)
def get_instance_properties(self, message):
"""Retrieve instance properties from notification payload."""
return message['payload']
@abc.abstractmethod
def get_sample(self, message):
"""Derive sample from notification payload."""
class InstanceScheduled(UserMetadataAwareInstanceNotificationBase,
plugin_base.NonMetricNotificationBase):
event_types = ['scheduler.run_instance.scheduled']
def get_instance_properties(self, message):
"""Retrieve instance properties from notification payload."""
return message['payload']['request_spec']['instance_properties']
def get_sample(self, message):
yield sample.Sample.from_notification(
name='instance.scheduled',
type=sample.TYPE_DELTA,
volume=1,
unit='instance',
user_id=None,
project_id=message['payload']['request_spec']
['instance_properties']['project_id'],
resource_id=message['payload']['instance_id'],
message=message)
class ComputeInstanceNotificationBase(
UserMetadataAwareInstanceNotificationBase):
"""Convert compute.instance.* notifications into Samples."""
event_types = ['compute.instance.*']
class Instance(ComputeInstanceNotificationBase,
plugin_base.NonMetricNotificationBase):
def get_sample(self, message):
yield sample.Sample.from_notification(
name='instance',
type=sample.TYPE_GAUGE,
unit='instance',
volume=1,
user_id=message['payload']['user_id'],
project_id=message['payload']['tenant_id'],
resource_id=message['payload']['instance_id'],
message=message)
class Memory(ComputeInstanceNotificationBase):
def get_sample(self, message):
yield sample.Sample.from_notification(
name='memory',
type=sample.TYPE_GAUGE,
unit='MB',
volume=message['payload']['memory_mb'],
user_id=message['payload']['user_id'],
project_id=message['payload']['tenant_id'],
resource_id=message['payload']['instance_id'],
message=message)
class VCpus(ComputeInstanceNotificationBase):
def get_sample(self, message):
yield sample.Sample.from_notification(
name='vcpus',
type=sample.TYPE_GAUGE,
unit='vcpu',
volume=message['payload']['vcpus'],
user_id=message['payload']['user_id'],
project_id=message['payload']['tenant_id'],
resource_id=message['payload']['instance_id'],
message=message)
class RootDiskSize(ComputeInstanceNotificationBase):
def get_sample(self, message):
yield sample.Sample.from_notification(
name='disk.root.size',
type=sample.TYPE_GAUGE,
unit='GB',
volume=message['payload']['root_gb'],
user_id=message['payload']['user_id'],
project_id=message['payload']['tenant_id'],
resource_id=message['payload']['instance_id'],
message=message)
class EphemeralDiskSize(ComputeInstanceNotificationBase):
def get_sample(self, message):
yield sample.Sample.from_notification(
name='disk.ephemeral.size',
type=sample.TYPE_GAUGE,
unit='GB',
volume=message['payload']['ephemeral_gb'],
user_id=message['payload']['user_id'],
project_id=message['payload']['tenant_id'],
resource_id=message['payload']['instance_id'],
message=message)
class InstanceFlavor(ComputeInstanceNotificationBase,
plugin_base.NonMetricNotificationBase):
def get_sample(self, message):
instance_type = message.get('payload', {}).get('instance_type')
if instance_type:
yield sample.Sample.from_notification(
name='instance:%s' % instance_type,
type=sample.TYPE_GAUGE,
unit='instance',
volume=1,
user_id=message['payload']['user_id'],
project_id=message['payload']['tenant_id'],
resource_id=message['payload']['instance_id'],
message=message)
class InstanceDelete(ComputeInstanceNotificationBase):
"""Handle the messages sent by the nova notifier plugin.
Messages are sent when an instance is being deleted.
"""
event_types = ['compute.instance.delete.samples']
def get_sample(self, message):
for s in message['payload'].get('samples', []):
yield sample.Sample.from_notification(
name=s['name'],
type=s['type'],
unit=s['unit'],
volume=s['volume'],
user_id=message['payload']['user_id'],
project_id=message['payload']['tenant_id'],
resource_id=message['payload']['instance_id'],
message=message)

View File

@ -1,51 +0,0 @@
# Copyright 2014 Mirantis, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_utils import timeutils
import six
from ceilometer.agent import plugin_base
from ceilometer.compute.virt import inspector as virt_inspector
@six.add_metaclass(abc.ABCMeta)
class BaseComputePollster(plugin_base.PollsterBase):
@property
def inspector(self):
try:
inspector = self._inspector
except AttributeError:
inspector = virt_inspector.get_hypervisor_inspector()
BaseComputePollster._inspector = inspector
return inspector
@property
def default_discovery(self):
return 'local_instances'
def _record_poll_time(self):
"""Method records current time as the poll time.
:return: time in seconds since the last poll time was recorded
"""
current_time = timeutils.utcnow()
duration = None
if hasattr(self, '_last_poll_time'):
duration = timeutils.delta_seconds(self._last_poll_time,
current_time)
self._last_poll_time = current_time
return duration

View File

@ -1,88 +0,0 @@
#
# Copyright 2012 eNovance <licensing@enovance.com>
# Copyright 2012 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
import ceilometer
from ceilometer.compute import pollsters
from ceilometer.compute.pollsters import util
from ceilometer.compute.virt import inspector as virt_inspector
from ceilometer.i18n import _
from ceilometer import sample
LOG = log.getLogger(__name__)
class CPUPollster(pollsters.BaseComputePollster):
def get_samples(self, manager, cache, resources):
for instance in resources:
LOG.debug(_('checking instance %s'), instance.id)
try:
cpu_info = self.inspector.inspect_cpus(instance)
LOG.debug(_("CPUTIME USAGE: %(instance)s %(time)d"),
{'instance': instance.__dict__,
'time': cpu_info.time})
cpu_num = {'cpu_number': cpu_info.number}
yield util.make_sample_from_instance(
instance,
name='cpu',
type=sample.TYPE_CUMULATIVE,
unit='ns',
volume=cpu_info.time,
additional_metadata=cpu_num,
)
except virt_inspector.InstanceNotFoundException as err:
# Instance was deleted while getting samples. Ignore it.
LOG.debug(_('Exception while getting samples %s'), err)
except ceilometer.NotImplementedError:
# Selected inspector does not implement this pollster.
LOG.debug(_('Obtaining CPU time is not implemented for %s'
), self.inspector.__class__.__name__)
except Exception as err:
LOG.exception(_('could not get CPU time for %(id)s: %(e)s'),
{'id': instance.id, 'e': err})
class CPUUtilPollster(pollsters.BaseComputePollster):
def get_samples(self, manager, cache, resources):
self._inspection_duration = self._record_poll_time()
for instance in resources:
LOG.debug(_('Checking CPU util for instance %s'), instance.id)
try:
cpu_info = self.inspector.inspect_cpu_util(
instance, self._inspection_duration)
LOG.debug(_("CPU UTIL: %(instance)s %(util)d"),
({'instance': instance.__dict__,
'util': cpu_info.util}))
yield util.make_sample_from_instance(
instance,
name='cpu_util',
type=sample.TYPE_GAUGE,
unit='%',
volume=cpu_info.util,
)
except virt_inspector.InstanceNotFoundException as err:
# Instance was deleted while getting samples. Ignore it.
LOG.debug(_('Exception while getting samples %s'), err)
except ceilometer.NotImplementedError:
# Selected inspector does not implement this pollster.
LOG.debug(_('Obtaining CPU Util is not implemented for %s'),
self.inspector.__class__.__name__)
except Exception as err:
LOG.exception(_('Could not get CPU Util for %(id)s: %(e)s'),
{'id': instance.id, 'e': err})

View File

@ -1,813 +0,0 @@
#
# Copyright 2012 eNovance <licensing@enovance.com>
# Copyright 2012 Red Hat, Inc
# Copyright 2014 Cisco Systems, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import collections
from oslo_log import log
import six
import ceilometer
from ceilometer.compute import pollsters
from ceilometer.compute.pollsters import util
from ceilometer.compute.virt import inspector as virt_inspector
from ceilometer.i18n import _, _LW
from ceilometer import sample
LOG = log.getLogger(__name__)
DiskIOData = collections.namedtuple(
'DiskIOData',
'r_bytes r_requests w_bytes w_requests per_disk_requests',
)
DiskRateData = collections.namedtuple('DiskRateData',
['read_bytes_rate',
'read_requests_rate',
'write_bytes_rate',
'write_requests_rate',
'per_disk_rate'])
DiskLatencyData = collections.namedtuple('DiskLatencyData',
['disk_latency',
'per_disk_latency'])
DiskIOPSData = collections.namedtuple('DiskIOPSData',
['iops_count',
'per_disk_iops'])
DiskInfoData = collections.namedtuple('DiskInfoData',
['capacity',
'allocation',
'physical',
'per_disk_info'])
@six.add_metaclass(abc.ABCMeta)
class _Base(pollsters.BaseComputePollster):
DISKIO_USAGE_MESSAGE = ' '.join(["DISKIO USAGE:",
"%s %s:",
"read-requests=%d",
"read-bytes=%d",
"write-requests=%d",
"write-bytes=%d",
"errors=%d",
])
CACHE_KEY_DISK = 'diskio'
def _populate_cache(self, inspector, cache, instance):
i_cache = cache.setdefault(self.CACHE_KEY_DISK, {})
if instance.id not in i_cache:
r_bytes = 0
r_requests = 0
w_bytes = 0
w_requests = 0
per_device_read_bytes = {}
per_device_read_requests = {}
per_device_write_bytes = {}
per_device_write_requests = {}
for disk, info in inspector.inspect_disks(instance):
LOG.debug(self.DISKIO_USAGE_MESSAGE,
instance, disk.device, info.read_requests,
info.read_bytes, info.write_requests,
info.write_bytes, info.errors)
r_bytes += info.read_bytes
r_requests += info.read_requests
w_bytes += info.write_bytes
w_requests += info.write_requests
# per disk data
per_device_read_bytes[disk.device] = info.read_bytes
per_device_read_requests[disk.device] = info.read_requests
per_device_write_bytes[disk.device] = info.write_bytes
per_device_write_requests[disk.device] = info.write_requests
per_device_requests = {
'read_bytes': per_device_read_bytes,
'read_requests': per_device_read_requests,
'write_bytes': per_device_write_bytes,
'write_requests': per_device_write_requests,
}
i_cache[instance.id] = DiskIOData(
r_bytes=r_bytes,
r_requests=r_requests,
w_bytes=w_bytes,
w_requests=w_requests,
per_disk_requests=per_device_requests,
)
return i_cache[instance.id]
@abc.abstractmethod
def _get_samples(instance, c_data):
"""Return one or more Sample."""
def get_samples(self, manager, cache, resources):
for instance in resources:
instance_name = util.instance_name(instance)
try:
c_data = self._populate_cache(
self.inspector,
cache,
instance,
)
for s in self._get_samples(instance, c_data):
yield s
except virt_inspector.InstanceNotFoundException as err:
# Instance was deleted while getting samples. Ignore it.
LOG.debug(_('Exception while getting samples %s'), err)
except virt_inspector.InstanceShutOffException as e:
LOG.warn(_LW('Instance %(instance_id)s was shut off while '
'getting samples of %(pollster)s: %(exc)s'),
{'instance_id': instance.id,
'pollster': self.__class__.__name__, 'exc': e})
except ceilometer.NotImplementedError:
# Selected inspector does not implement this pollster.
LOG.debug(_('%(inspector)s does not provide data for '
' %(pollster)s'),
{'inspector': self.inspector.__class__.__name__,
'pollster': self.__class__.__name__})
except Exception as err:
LOG.exception(_('Ignoring instance %(name)s: %(error)s'),
{'name': instance_name, 'error': err})
class ReadRequestsPollster(_Base):
@staticmethod
def _get_samples(instance, c_data):
return [util.make_sample_from_instance(
instance,
name='disk.read.requests',
type=sample.TYPE_CUMULATIVE,
unit='request',
volume=c_data.r_requests,
additional_metadata={
'device': c_data.per_disk_requests['read_requests'].keys()}
)]
class PerDeviceReadRequestsPollster(_Base):
@staticmethod
def _get_samples(instance, c_data):
samples = []
for disk, value in six.iteritems(c_data.per_disk_requests[
'read_requests']):
samples.append(util.make_sample_from_instance(
instance,
name='disk.device.read.requests',
type=sample.TYPE_CUMULATIVE,
unit='request',
volume=value,
resource_id="%s-%s" % (instance.id, disk),
))
return samples
class ReadBytesPollster(_Base):
@staticmethod
def _get_samples(instance, c_data):
return [util.make_sample_from_instance(
instance,
name='disk.read.bytes',
type=sample.TYPE_CUMULATIVE,
unit='B',
volume=c_data.r_bytes,
additional_metadata={
'device': c_data.per_disk_requests['read_bytes'].keys()},
)]
class PerDeviceReadBytesPollster(_Base):
@staticmethod
def _get_samples(instance, c_data):
samples = []
for disk, value in six.iteritems(c_data.per_disk_requests[
'read_bytes']):
samples.append(util.make_sample_from_instance(
instance,
name='disk.device.read.bytes',
type=sample.TYPE_CUMULATIVE,
unit='B',
volume=value,
resource_id="%s-%s" % (instance.id, disk),
))
return samples
class WriteRequestsPollster(_Base):
@staticmethod
def _get_samples(instance, c_data):
return [util.make_sample_from_instance(
instance,
name='disk.write.requests',
type=sample.TYPE_CUMULATIVE,
unit='request',
volume=c_data.w_requests,
additional_metadata={
'device': c_data.per_disk_requests['write_requests'].keys()},
)]
class PerDeviceWriteRequestsPollster(_Base):
@staticmethod
def _get_samples(instance, c_data):
samples = []
for disk, value in six.iteritems(c_data.per_disk_requests[
'write_requests']):
samples.append(util.make_sample_from_instance(
instance,
name='disk.device.write.requests',
type=sample.TYPE_CUMULATIVE,
unit='request',
volume=value,
resource_id="%s-%s" % (instance.id, disk),
))
return samples
class WriteBytesPollster(_Base):
@staticmethod
def _get_samples(instance, c_data):
return [util.make_sample_from_instance(
instance,
name='disk.write.bytes',
type=sample.TYPE_CUMULATIVE,
unit='B',
volume=c_data.w_bytes,
additional_metadata={
'device': c_data.per_disk_requests['write_bytes'].keys()},
)]
class PerDeviceWriteBytesPollster(_Base):
@staticmethod
def _get_samples(instance, c_data):
samples = []
for disk, value in six.iteritems(c_data.per_disk_requests[
'write_bytes']):
samples.append(util.make_sample_from_instance(
instance,
name='disk.device.write.bytes',
type=sample.TYPE_CUMULATIVE,
unit='B',
volume=value,
resource_id="%s-%s" % (instance.id, disk),
))
return samples
@six.add_metaclass(abc.ABCMeta)
class _DiskRatesPollsterBase(pollsters.BaseComputePollster):
CACHE_KEY_DISK_RATE = 'diskio-rate'
def _populate_cache(self, inspector, cache, instance):
i_cache = cache.setdefault(self.CACHE_KEY_DISK_RATE, {})
if instance.id not in i_cache:
r_bytes_rate = 0
r_requests_rate = 0
w_bytes_rate = 0
w_requests_rate = 0
per_disk_r_bytes_rate = {}
per_disk_r_requests_rate = {}
per_disk_w_bytes_rate = {}
per_disk_w_requests_rate = {}
disk_rates = inspector.inspect_disk_rates(
instance, self._inspection_duration)
for disk, info in disk_rates:
r_bytes_rate += info.read_bytes_rate
r_requests_rate += info.read_requests_rate
w_bytes_rate += info.write_bytes_rate
w_requests_rate += info.write_requests_rate
per_disk_r_bytes_rate[disk.device] = info.read_bytes_rate
per_disk_r_requests_rate[disk.device] = info.read_requests_rate
per_disk_w_bytes_rate[disk.device] = info.write_bytes_rate
per_disk_w_requests_rate[disk.device] = (
info.write_requests_rate)
per_disk_rate = {
'read_bytes_rate': per_disk_r_bytes_rate,
'read_requests_rate': per_disk_r_requests_rate,
'write_bytes_rate': per_disk_w_bytes_rate,
'write_requests_rate': per_disk_w_requests_rate,
}
i_cache[instance.id] = DiskRateData(
r_bytes_rate,
r_requests_rate,
w_bytes_rate,
w_requests_rate,
per_disk_rate
)
return i_cache[instance.id]
@abc.abstractmethod
def _get_samples(self, instance, disk_rates_info):
"""Return one or more Sample."""
def get_samples(self, manager, cache, resources):
self._inspection_duration = self._record_poll_time()
for instance in resources:
try:
disk_rates_info = self._populate_cache(
self.inspector,
cache,
instance,
)
for disk_rate in self._get_samples(instance, disk_rates_info):
yield disk_rate
except virt_inspector.InstanceNotFoundException as err:
# Instance was deleted while getting samples. Ignore it.
LOG.debug(_('Exception while getting samples %s'), err)
except ceilometer.NotImplementedError:
# Selected inspector does not implement this pollster.
LOG.debug(_('%(inspector)s does not provide data for '
' %(pollster)s'),
{'inspector': self.inspector.__class__.__name__,
'pollster': self.__class__.__name__})
except Exception as err:
instance_name = util.instance_name(instance)
LOG.exception(_('Ignoring instance %(name)s: %(error)s'),
{'name': instance_name, 'error': err})
class ReadBytesRatePollster(_DiskRatesPollsterBase):
def _get_samples(self, instance, disk_rates_info):
return [util.make_sample_from_instance(
instance,
name='disk.read.bytes.rate',
type=sample.TYPE_GAUGE,
unit='B/s',
volume=disk_rates_info.read_bytes_rate,
additional_metadata={
'device': disk_rates_info.per_disk_rate[
'read_bytes_rate'].keys()},
)]
class PerDeviceReadBytesRatePollster(_DiskRatesPollsterBase):
def _get_samples(self, instance, disk_rates_info):
samples = []
for disk, value in six.iteritems(disk_rates_info.per_disk_rate[
'read_bytes_rate']):
samples.append(util.make_sample_from_instance(
instance,
name='disk.device.read.bytes.rate',
type=sample.TYPE_GAUGE,
unit='B/s',
volume=value,
resource_id="%s-%s" % (instance.id, disk),
))
return samples
class ReadRequestsRatePollster(_DiskRatesPollsterBase):
def _get_samples(self, instance, disk_rates_info):
return [util.make_sample_from_instance(
instance,
name='disk.read.requests.rate',
type=sample.TYPE_GAUGE,
unit='requests/s',
volume=disk_rates_info.read_requests_rate,
additional_metadata={
'device': disk_rates_info.per_disk_rate[
'read_requests_rate'].keys()},
)]
class PerDeviceReadRequestsRatePollster(_DiskRatesPollsterBase):
def _get_samples(self, instance, disk_rates_info):
samples = []
for disk, value in six.iteritems(disk_rates_info.per_disk_rate[
'read_requests_rate']):
samples.append(util.make_sample_from_instance(
instance,
name='disk.device.read.requests.rate',
type=sample.TYPE_GAUGE,
unit='requests/s',
volume=value,
resource_id="%s-%s" % (instance.id, disk),
))
return samples
class WriteBytesRatePollster(_DiskRatesPollsterBase):
def _get_samples(self, instance, disk_rates_info):
return [util.make_sample_from_instance(
instance,
name='disk.write.bytes.rate',
type=sample.TYPE_GAUGE,
unit='B/s',
volume=disk_rates_info.write_bytes_rate,
additional_metadata={
'device': disk_rates_info.per_disk_rate[
'write_bytes_rate'].keys()},
)]
class PerDeviceWriteBytesRatePollster(_DiskRatesPollsterBase):
def _get_samples(self, instance, disk_rates_info):
samples = []
for disk, value in six.iteritems(disk_rates_info.per_disk_rate[
'write_bytes_rate']):
samples.append(util.make_sample_from_instance(
instance,
name='disk.device.write.bytes.rate',
type=sample.TYPE_GAUGE,
unit='B/s',
volume=value,
resource_id="%s-%s" % (instance.id, disk),
))
return samples
class WriteRequestsRatePollster(_DiskRatesPollsterBase):
def _get_samples(self, instance, disk_rates_info):
return [util.make_sample_from_instance(
instance,
name='disk.write.requests.rate',
type=sample.TYPE_GAUGE,
unit='requests/s',
volume=disk_rates_info.write_requests_rate,
additional_metadata={
'device': disk_rates_info.per_disk_rate[
'write_requests_rate'].keys()},
)]
class PerDeviceWriteRequestsRatePollster(_DiskRatesPollsterBase):
def _get_samples(self, instance, disk_rates_info):
samples = []
for disk, value in six.iteritems(disk_rates_info.per_disk_rate[
'write_requests_rate']):
samples.append(util.make_sample_from_instance(
instance,
name='disk.device.write.requests.rate',
type=sample.TYPE_GAUGE,
unit='requests/s',
volume=value,
resource_id="%s-%s" % (instance.id, disk),
))
return samples
@six.add_metaclass(abc.ABCMeta)
class _DiskLatencyPollsterBase(pollsters.BaseComputePollster):
CACHE_KEY_DISK_LATENCY = 'disk-latency'
def _populate_cache(self, inspector, cache, instance):
i_cache = cache.setdefault(self.CACHE_KEY_DISK_LATENCY, {})
if instance.id not in i_cache:
latency = 0
per_device_latency = {}
disk_rates = inspector.inspect_disk_latency(instance)
for disk, stats in disk_rates:
latency += stats.disk_latency
per_device_latency[disk.device] = (
stats.disk_latency)
per_disk_latency = {
'disk_latency': per_device_latency
}
i_cache[instance.id] = DiskLatencyData(
latency,
per_disk_latency
)
return i_cache[instance.id]
@abc.abstractmethod
def _get_samples(self, instance, disk_rates_info):
"""Return one or more Sample."""
def get_samples(self, manager, cache, resources):
for instance in resources:
try:
disk_latency_info = self._populate_cache(
self.inspector,
cache,
instance,
)
for disk_latency in self._get_samples(instance,
disk_latency_info):
yield disk_latency
except virt_inspector.InstanceNotFoundException as err:
# Instance was deleted while getting samples. Ignore it.
LOG.debug(_('Exception while getting samples %s'), err)
except ceilometer.NotImplementedError:
# Selected inspector does not implement this pollster.
LOG.debug(_('%(inspector)s does not provide data for '
' %(pollster)s'),
{'inspector': self.inspector.__class__.__name__,
'pollster': self.__class__.__name__})
except Exception as err:
instance_name = util.instance_name(instance)
LOG.exception(_('Ignoring instance %(name)s: %(error)s'),
{'name': instance_name, 'error': err})
class DiskLatencyPollster(_DiskLatencyPollsterBase):
def _get_samples(self, instance, disk_latency_info):
return [util.make_sample_from_instance(
instance,
name='disk.latency',
type=sample.TYPE_GAUGE,
unit='ms',
volume=disk_latency_info.disk_latency / 1000
)]
class PerDeviceDiskLatencyPollster(_DiskLatencyPollsterBase):
def _get_samples(self, instance, disk_latency_info):
samples = []
for disk, value in six.iteritems(disk_latency_info.per_disk_latency[
'disk_latency']):
samples.append(util.make_sample_from_instance(
instance,
name='disk.device.latency',
type=sample.TYPE_GAUGE,
unit='ms',
volume=value / 1000,
resource_id="%s-%s" % (instance.id, disk)
))
return samples
class _DiskIOPSPollsterBase(pollsters.BaseComputePollster):
CACHE_KEY_DISK_IOPS = 'disk-iops'
def _populate_cache(self, inspector, cache, instance):
i_cache = cache.setdefault(self.CACHE_KEY_DISK_IOPS, {})
if instance.id not in i_cache:
iops = 0
per_device_iops = {}
disk_iops_count = inspector.inspect_disk_iops(instance)
for disk, stats in disk_iops_count:
iops += stats.iops_count
per_device_iops[disk.device] = (stats.iops_count)
per_disk_iops = {
'iops_count': per_device_iops
}
i_cache[instance.id] = DiskIOPSData(
iops,
per_disk_iops
)
return i_cache[instance.id]
@abc.abstractmethod
def _get_samples(self, instance, disk_rates_info):
"""Return one or more Sample."""
def get_samples(self, manager, cache, resources):
for instance in resources:
try:
disk_iops_info = self._populate_cache(
self.inspector,
cache,
instance,
)
for disk_iops in self._get_samples(instance,
disk_iops_info):
yield disk_iops
except virt_inspector.InstanceNotFoundException as err:
# Instance was deleted while getting samples. Ignore it.
LOG.debug(_('Exception while getting samples %s'), err)
except ceilometer.NotImplementedError:
# Selected inspector does not implement this pollster.
LOG.debug(_('%(inspector)s does not provide data for '
'%(pollster)s'),
{'inspector': self.inspector.__class__.__name__,
'pollster': self.__class__.__name__})
except Exception as err:
instance_name = util.instance_name(instance)
LOG.exception(_('Ignoring instance %(name)s: %(error)s'),
{'name': instance_name, 'error': err})
class DiskIOPSPollster(_DiskIOPSPollsterBase):
def _get_samples(self, instance, disk_iops_info):
return [util.make_sample_from_instance(
instance,
name='disk.iops',
type=sample.TYPE_GAUGE,
unit='count/s',
volume=disk_iops_info.iops_count
)]
class PerDeviceDiskIOPSPollster(_DiskIOPSPollsterBase):
def _get_samples(self, instance, disk_iops_info):
samples = []
for disk, value in six.iteritems(disk_iops_info.per_disk_iops[
'iops_count']):
samples.append(util.make_sample_from_instance(
instance,
name='disk.device.iops',
type=sample.TYPE_GAUGE,
unit='count/s',
volume=value,
resource_id="%s-%s" % (instance.id, disk)
))
return samples
@six.add_metaclass(abc.ABCMeta)
class _DiskInfoPollsterBase(pollsters.BaseComputePollster):
CACHE_KEY_DISK_INFO = 'diskinfo'
def _populate_cache(self, inspector, cache, instance):
i_cache = cache.setdefault(self.CACHE_KEY_DISK_INFO, {})
if instance.id not in i_cache:
all_capacity = 0
all_allocation = 0
all_physical = 0
per_disk_capacity = {}
per_disk_allocation = {}
per_disk_physical = {}
disk_info = inspector.inspect_disk_info(
instance)
for disk, info in disk_info:
all_capacity += info.capacity
all_allocation += info.allocation
all_physical += info.physical
per_disk_capacity[disk.device] = info.capacity
per_disk_allocation[disk.device] = info.allocation
per_disk_physical[disk.device] = info.physical
per_disk_info = {
'capacity': per_disk_capacity,
'allocation': per_disk_allocation,
'physical': per_disk_physical,
}
i_cache[instance.id] = DiskInfoData(
all_capacity,
all_allocation,
all_physical,
per_disk_info
)
return i_cache[instance.id]
@abc.abstractmethod
def _get_samples(self, instance, disk_info):
"""Return one or more Sample."""
def get_samples(self, manager, cache, resources):
for instance in resources:
try:
disk_size_info = self._populate_cache(
self.inspector,
cache,
instance,
)
for disk_info in self._get_samples(instance, disk_size_info):
yield disk_info
except virt_inspector.InstanceNotFoundException as err:
# Instance was deleted while getting samples. Ignore it.
LOG.debug(_('Exception while getting samples %s'), err)
except virt_inspector.InstanceShutOffException as e:
LOG.warn(_LW('Instance %(instance_id)s was shut off while '
'getting samples of %(pollster)s: %(exc)s'),
{'instance_id': instance.id,
'pollster': self.__class__.__name__, 'exc': e})
except ceilometer.NotImplementedError:
# Selected inspector does not implement this pollster.
LOG.debug(_('%(inspector)s does not provide data for '
' %(pollster)s'), (
{'inspector': self.inspector.__class__.__name__,
'pollster': self.__class__.__name__}))
except Exception as err:
instance_name = util.instance_name(instance)
LOG.exception(_('Ignoring instance %(name)s '
'(%(instance_id)s) : %(error)s') % (
{'name': instance_name,
'instance_id': instance.id,
'error': err}))
class CapacityPollster(_DiskInfoPollsterBase):
def _get_samples(self, instance, disk_info):
return [util.make_sample_from_instance(
instance,
name='disk.capacity',
type=sample.TYPE_GAUGE,
unit='B',
volume=disk_info.capacity,
additional_metadata={
'device': disk_info.per_disk_info[
'capacity'].keys()},
)]
class PerDeviceCapacityPollster(_DiskInfoPollsterBase):
def _get_samples(self, instance, disk_info):
samples = []
for disk, value in six.iteritems(disk_info.per_disk_info[
'capacity']):
samples.append(util.make_sample_from_instance(
instance,
name='disk.device.capacity',
type=sample.TYPE_GAUGE,
unit='B',
volume=value,
resource_id="%s-%s" % (instance.id, disk),
))
return samples
class AllocationPollster(_DiskInfoPollsterBase):
def _get_samples(self, instance, disk_info):
return [util.make_sample_from_instance(
instance,
name='disk.allocation',
type=sample.TYPE_GAUGE,
unit='B',
volume=disk_info.allocation,
additional_metadata={
'device': disk_info.per_disk_info[
'allocation'].keys()},
)]
class PerDeviceAllocationPollster(_DiskInfoPollsterBase):
def _get_samples(self, instance, disk_info):
samples = []
for disk, value in six.iteritems(disk_info.per_disk_info[
'allocation']):
samples.append(util.make_sample_from_instance(
instance,
name='disk.device.allocation',
type=sample.TYPE_GAUGE,
unit='B',
volume=value,
resource_id="%s-%s" % (instance.id, disk),
))
return samples
class PhysicalPollster(_DiskInfoPollsterBase):
def _get_samples(self, instance, disk_info):
return [util.make_sample_from_instance(
instance,
name='disk.usage',
type=sample.TYPE_GAUGE,
unit='B',
volume=disk_info.physical,
additional_metadata={
'device': disk_info.per_disk_info[
'physical'].keys()},
)]
class PerDevicePhysicalPollster(_DiskInfoPollsterBase):
def _get_samples(self, instance, disk_info):
samples = []
for disk, value in six.iteritems(disk_info.per_disk_info[
'physical']):
samples.append(util.make_sample_from_instance(
instance,
name='disk.device.usage',
type=sample.TYPE_GAUGE,
unit='B',
volume=value,
resource_id="%s-%s" % (instance.id, disk),
))
return samples

View File

@ -1,49 +0,0 @@
#
# Copyright 2012 eNovance <licensing@enovance.com>
# Copyright 2012 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometer.compute import pollsters
from ceilometer.compute.pollsters import util
from ceilometer import sample
class InstancePollster(pollsters.BaseComputePollster):
@staticmethod
def get_samples(manager, cache, resources):
for instance in resources:
yield util.make_sample_from_instance(
instance,
name='instance',
type=sample.TYPE_GAUGE,
unit='instance',
volume=1,
)
class InstanceFlavorPollster(pollsters.BaseComputePollster):
@staticmethod
def get_samples(manager, cache, resources):
for instance in resources:
yield util.make_sample_from_instance(
instance,
# Use the "meter name + variable" syntax
name='instance:%s' %
instance.flavor['name'],
type=sample.TYPE_GAUGE,
unit='instance',
volume=1,
)

View File

@ -1,110 +0,0 @@
# Copyright (c) 2014 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
import ceilometer
from ceilometer.compute import pollsters
from ceilometer.compute.pollsters import util
from ceilometer.compute.virt import inspector as virt_inspector
from ceilometer.i18n import _, _LW, _LE
from ceilometer import sample
LOG = log.getLogger(__name__)
class MemoryUsagePollster(pollsters.BaseComputePollster):
def get_samples(self, manager, cache, resources):
self._inspection_duration = self._record_poll_time()
for instance in resources:
LOG.debug(_('Checking memory usage for instance %s'), instance.id)
try:
memory_info = self.inspector.inspect_memory_usage(
instance, self._inspection_duration)
LOG.debug(_("MEMORY USAGE: %(instance)s %(usage)f"),
({'instance': instance.__dict__,
'usage': memory_info.usage}))
yield util.make_sample_from_instance(
instance,
name='memory.usage',
type=sample.TYPE_GAUGE,
unit='MB',
volume=memory_info.usage,
)
except virt_inspector.InstanceNotFoundException as err:
# Instance was deleted while getting samples. Ignore it.
LOG.debug(_('Exception while getting samples %s'), err)
except virt_inspector.InstanceShutOffException as e:
LOG.warn(_LW('Instance %(instance_id)s was shut off while '
'getting samples of %(pollster)s: %(exc)s'),
{'instance_id': instance.id,
'pollster': self.__class__.__name__, 'exc': e})
except virt_inspector.NoDataException as e:
LOG.warn(_LW('Cannot inspect data of %(pollster)s for '
'%(instance_id)s, non-fatal reason: %(exc)s'),
{'pollster': self.__class__.__name__,
'instance_id': instance.id, 'exc': e})
except ceilometer.NotImplementedError:
# Selected inspector does not implement this pollster.
LOG.debug(_('Obtaining Memory Usage is not implemented for %s'
), self.inspector.__class__.__name__)
except Exception as err:
LOG.exception(_('Could not get Memory Usage for '
'%(id)s: %(e)s'), {'id': instance.id,
'e': err})
class MemoryResidentPollster(pollsters.BaseComputePollster):
def get_samples(self, manager, cache, resources):
self._inspection_duration = self._record_poll_time()
for instance in resources:
LOG.debug(_('Checking resident memory for instance %s'),
instance.id)
try:
memory_info = self.inspector.inspect_memory_resident(
instance, self._inspection_duration)
LOG.debug(_("RESIDENT MEMORY: %(instance)s %(resident)f"),
({'instance': instance.__dict__,
'resident': memory_info.resident}))
yield util.make_sample_from_instance(
instance,
name='memory.resident',
type=sample.TYPE_GAUGE,
unit='MB',
volume=memory_info.resident,
)
except virt_inspector.InstanceNotFoundException as err:
# Instance was deleted while getting samples. Ignore it.
LOG.debug(_('Exception while getting samples %s'), err)
except virt_inspector.InstanceShutOffException as e:
LOG.warn(_LW('Instance %(instance_id)s was shut off while '
'getting samples of %(pollster)s: %(exc)s'),
{'instance_id': instance.id,
'pollster': self.__class__.__name__, 'exc': e})
except virt_inspector.NoDataException as e:
LOG.warn(_LW('Cannot inspect data of %(pollster)s for '
'%(instance_id)s, non-fatal reason: %(exc)s'),
{'pollster': self.__class__.__name__,
'instance_id': instance.id, 'exc': e})
except ceilometer.NotImplementedError:
# Selected inspector does not implement this pollster.
LOG.debug(_('Obtaining Resident Memory is not implemented'
' for %s'), self.inspector.__class__.__name__)
except Exception as err:
LOG.exception(_LE('Could not get Resident Memory Usage for '
'%(id)s: %(e)s'), {'id': instance.id,
'e': err})

View File

@ -1,219 +0,0 @@
#
# Copyright 2012 eNovance <licensing@enovance.com>
# Copyright 2012 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
from oslo_log import log
from oslo_utils import timeutils
import ceilometer
from ceilometer.compute import pollsters
from ceilometer.compute.pollsters import util
from ceilometer.compute import util as compute_util
from ceilometer.compute.virt import inspector as virt_inspector
from ceilometer.i18n import _, _LW
from ceilometer import sample
LOG = log.getLogger(__name__)
class _Base(pollsters.BaseComputePollster):
NET_USAGE_MESSAGE = ' '.join(["NETWORK USAGE:", "%s %s:", "read-bytes=%d",
"write-bytes=%d"])
@staticmethod
def make_vnic_sample(instance, name, type, unit, volume, vnic_data):
metadata = copy.copy(vnic_data)
resource_metadata = dict(zip(metadata._fields, metadata))
resource_metadata['instance_id'] = instance.id
resource_metadata['instance_type'] = (instance.flavor['id'] if
instance.flavor else None)
compute_util.add_reserved_user_metadata(instance.metadata,
resource_metadata)
if vnic_data.fref is not None:
rid = vnic_data.fref
else:
instance_name = util.instance_name(instance)
rid = "%s-%s-%s" % (instance_name, instance.id, vnic_data.name)
return sample.Sample(
name=name,
type=type,
unit=unit,
volume=volume,
user_id=instance.user_id,
project_id=instance.tenant_id,
resource_id=rid,
timestamp=timeutils.isotime(),
resource_metadata=resource_metadata
)
CACHE_KEY_VNIC = 'vnics'
def _get_vnic_info(self, inspector, instance):
return inspector.inspect_vnics(instance)
@staticmethod
def _get_rx_info(info):
return info.rx_bytes
@staticmethod
def _get_tx_info(info):
return info.tx_bytes
def _get_vnics_for_instance(self, cache, inspector, instance):
i_cache = cache.setdefault(self.CACHE_KEY_VNIC, {})
if instance.id not in i_cache:
i_cache[instance.id] = list(
self._get_vnic_info(inspector, instance)
)
return i_cache[instance.id]
def get_samples(self, manager, cache, resources):
self._inspection_duration = self._record_poll_time()
for instance in resources:
instance_name = util.instance_name(instance)
LOG.debug(_('checking net info for instance %s'), instance.id)
try:
vnics = self._get_vnics_for_instance(
cache,
self.inspector,
instance,
)
for vnic, info in vnics:
LOG.debug(self.NET_USAGE_MESSAGE, instance_name,
vnic.name, self._get_rx_info(info),
self._get_tx_info(info))
yield self._get_sample(instance, vnic, info)
except virt_inspector.InstanceNotFoundException as err:
# Instance was deleted while getting samples. Ignore it.
LOG.debug(_('Exception while getting samples %s'), err)
except virt_inspector.InstanceShutOffException as e:
LOG.warn(_LW('Instance %(instance_id)s was shut off while '
'getting samples of %(pollster)s: %(exc)s'),
{'instance_id': instance.id,
'pollster': self.__class__.__name__, 'exc': e})
except ceilometer.NotImplementedError:
# Selected inspector does not implement this pollster.
LOG.debug(_('%(inspector)s does not provide data for '
' %(pollster)s'),
{'inspector': self.inspector.__class__.__name__,
'pollster': self.__class__.__name__})
except Exception as err:
LOG.exception(_('Ignoring instance %(name)s: %(error)s'),
{'name': instance_name, 'error': err})
class _RateBase(_Base):
NET_USAGE_MESSAGE = ' '.join(["NETWORK RATE:", "%s %s:",
"read-bytes-rate=%d",
"write-bytes-rate=%d"])
CACHE_KEY_VNIC = 'vnic-rates'
def _get_vnic_info(self, inspector, instance):
return inspector.inspect_vnic_rates(instance,
self._inspection_duration)
@staticmethod
def _get_rx_info(info):
return info.rx_bytes_rate
@staticmethod
def _get_tx_info(info):
return info.tx_bytes_rate
class IncomingBytesPollster(_Base):
def _get_sample(self, instance, vnic, info):
return self.make_vnic_sample(
instance,
name='network.incoming.bytes',
type=sample.TYPE_CUMULATIVE,
unit='B',
volume=info.rx_bytes,
vnic_data=vnic,
)
class IncomingPacketsPollster(_Base):
def _get_sample(self, instance, vnic, info):
return self.make_vnic_sample(
instance,
name='network.incoming.packets',
type=sample.TYPE_CUMULATIVE,
unit='packet',
volume=info.rx_packets,
vnic_data=vnic,
)
class OutgoingBytesPollster(_Base):
def _get_sample(self, instance, vnic, info):
return self.make_vnic_sample(
instance,
name='network.outgoing.bytes',
type=sample.TYPE_CUMULATIVE,
unit='B',
volume=info.tx_bytes,
vnic_data=vnic,
)
class OutgoingPacketsPollster(_Base):
def _get_sample(self, instance, vnic, info):
return self.make_vnic_sample(
instance,
name='network.outgoing.packets',
type=sample.TYPE_CUMULATIVE,
unit='packet',
volume=info.tx_packets,
vnic_data=vnic,
)
class IncomingBytesRatePollster(_RateBase):
def _get_sample(self, instance, vnic, info):
return self.make_vnic_sample(
instance,
name='network.incoming.bytes.rate',
type=sample.TYPE_GAUGE,
unit='B/s',
volume=info.rx_bytes_rate,
vnic_data=vnic,
)
class OutgoingBytesRatePollster(_RateBase):
def _get_sample(self, instance, vnic, info):
return self.make_vnic_sample(
instance,
name='network.outgoing.bytes.rate',
type=sample.TYPE_GAUGE,
unit='B/s',
volume=info.tx_bytes_rate,
vnic_data=vnic,
)

View File

@ -1,94 +0,0 @@
#
# Copyright 2012 eNovance <licensing@enovance.com>
# Copyright 2012 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_utils import timeutils
from ceilometer.compute import util as compute_util
from ceilometer import sample
INSTANCE_PROPERTIES = [
# Identity properties
'reservation_id',
# Type properties
'architecture',
'OS-EXT-AZ:availability_zone',
'kernel_id',
'os_type',
'ramdisk_id',
]
def _get_metadata_from_object(instance):
"""Return a metadata dictionary for the instance."""
metadata = {
'display_name': instance.name,
'name': getattr(instance, 'OS-EXT-SRV-ATTR:instance_name', u''),
'instance_type': (instance.flavor['id'] if instance.flavor else None),
'host': instance.hostId,
'flavor': instance.flavor,
'status': instance.status.lower(),
}
# Image properties
if instance.image:
metadata['image'] = instance.image
metadata['image_ref'] = instance.image['id']
# Images that come through the conductor API in the nova notifier
# plugin will not have links.
if instance.image.get('links'):
metadata['image_ref_url'] = instance.image['links'][0]['href']
else:
metadata['image_ref_url'] = None
else:
metadata['image'] = None
metadata['image_ref'] = None
metadata['image_ref_url'] = None
for name in INSTANCE_PROPERTIES:
if hasattr(instance, name):
metadata[name] = getattr(instance, name)
metadata['vcpus'] = instance.flavor['vcpus']
metadata['memory_mb'] = instance.flavor['ram']
metadata['disk_gb'] = instance.flavor['disk']
metadata['ephemeral_gb'] = instance.flavor['ephemeral']
metadata['root_gb'] = (int(metadata['disk_gb']) -
int(metadata['ephemeral_gb']))
return compute_util.add_reserved_user_metadata(instance.metadata, metadata)
def make_sample_from_instance(instance, name, type, unit, volume,
resource_id=None, additional_metadata=None):
additional_metadata = additional_metadata or {}
resource_metadata = _get_metadata_from_object(instance)
resource_metadata.update(additional_metadata)
return sample.Sample(
name=name,
type=type,
unit=unit,
volume=volume,
user_id=instance.user_id,
project_id=instance.tenant_id,
resource_id=resource_id or instance.id,
timestamp=timeutils.isotime(),
resource_metadata=resource_metadata,
)
def instance_name(instance):
"""Shortcut to get instance name."""
return getattr(instance, 'OS-EXT-SRV-ATTR:instance_name', None)

View File

@ -1,66 +0,0 @@
#
# Copyright 2014 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
import six
# Below config is for collecting metadata which user defined in nova or else,
# and then storing it to Sample for future use according to user's requirement.
# Such as using it as OpenTSDB tags for metrics.
OPTS = [
cfg.ListOpt('reserved_metadata_namespace',
default=['metering.'],
help='List of metadata prefixes reserved for metering use.'),
cfg.IntOpt('reserved_metadata_length',
default=256,
help='Limit on length of reserved metadata values.'),
cfg.ListOpt('reserved_metadata_keys',
default=[],
help='List of metadata keys reserved for metering use. And '
'these keys are additional to the ones included in the '
'namespace.'),
]
cfg.CONF.register_opts(OPTS)
def add_reserved_user_metadata(src_metadata, dest_metadata):
limit = cfg.CONF.reserved_metadata_length
user_metadata = {}
for prefix in cfg.CONF.reserved_metadata_namespace:
md = dict(
(k[len(prefix):].replace('.', '_'),
v[:limit] if isinstance(v, six.string_types) else v)
for k, v in src_metadata.items()
if (k.startswith(prefix) and
k[len(prefix):].replace('.', '_') not in dest_metadata)
)
user_metadata.update(md)
for metadata_key in cfg.CONF.reserved_metadata_keys:
md = dict(
(k.replace('.', '_'),
v[:limit] if isinstance(v, six.string_types) else v)
for k, v in src_metadata.items()
if (k == metadata_key and
k.replace('.', '_') not in dest_metadata)
)
user_metadata.update(md)
if user_metadata:
dest_metadata['user_metadata'] = user_metadata
return dest_metadata

View File

@ -1,101 +0,0 @@
# Copyright 2013 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of Inspector abstraction for Hyper-V"""
from oslo_config import cfg
from oslo_log import log
from oslo_utils import units
from ceilometer.compute.pollsters import util
from ceilometer.compute.virt.hyperv import utilsv2
from ceilometer.compute.virt import inspector as virt_inspector
CONF = cfg.CONF
LOG = log.getLogger(__name__)
class HyperVInspector(virt_inspector.Inspector):
def __init__(self):
super(HyperVInspector, self).__init__()
self._utils = utilsv2.UtilsV2()
def inspect_cpus(self, instance):
instance_name = util.instance_name(instance)
(cpu_clock_used,
cpu_count, uptime) = self._utils.get_cpu_metrics(instance_name)
host_cpu_clock, host_cpu_count = self._utils.get_host_cpu_info()
cpu_percent_used = (cpu_clock_used /
float(host_cpu_clock * cpu_count))
# Nanoseconds
cpu_time = (int(uptime * cpu_percent_used) * units.k)
return virt_inspector.CPUStats(number=cpu_count, time=cpu_time)
def inspect_memory_usage(self, instance, duration=None):
instance_name = util.instance_name(instance)
usage = self._utils.get_memory_metrics(instance_name)
return virt_inspector.MemoryUsageStats(usage=usage)
def inspect_vnics(self, instance):
instance_name = util.instance_name(instance)
for vnic_metrics in self._utils.get_vnic_metrics(instance_name):
interface = virt_inspector.Interface(
name=vnic_metrics["element_name"],
mac=vnic_metrics["address"],
fref=None,
parameters=None)
stats = virt_inspector.InterfaceStats(
rx_bytes=vnic_metrics['rx_mb'] * units.Mi,
rx_packets=0,
tx_bytes=vnic_metrics['tx_mb'] * units.Mi,
tx_packets=0)
yield (interface, stats)
def inspect_disks(self, instance):
instance_name = util.instance_name(instance)
for disk_metrics in self._utils.get_disk_metrics(instance_name):
disk = virt_inspector.Disk(device=disk_metrics['instance_id'])
stats = virt_inspector.DiskStats(
read_requests=0,
# Return bytes
read_bytes=disk_metrics['read_mb'] * units.Mi,
write_requests=0,
write_bytes=disk_metrics['write_mb'] * units.Mi,
errors=0)
yield (disk, stats)
def inspect_disk_latency(self, instance):
instance_name = util.instance_name(instance)
for disk_metrics in self._utils.get_disk_latency_metrics(
instance_name):
disk = virt_inspector.Disk(device=disk_metrics['instance_id'])
stats = virt_inspector.DiskLatencyStats(
disk_latency=disk_metrics['disk_latency'])
yield (disk, stats)
def inspect_disk_iops(self, instance):
instance_name = util.instance_name(instance)
for disk_metrics in self._utils.get_disk_iops_count(instance_name):
disk = virt_inspector.Disk(device=disk_metrics['instance_id'])
stats = virt_inspector.DiskIOPSStats(
iops_count=disk_metrics['iops_count'])
yield (disk, stats)

View File

@ -1,253 +0,0 @@
# Copyright 2013 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Utility class for VM related operations.
Based on the "root/virtualization/v2" namespace available starting with
Hyper-V Server / Windows Server 2012.
"""
import sys
if sys.platform == 'win32':
import wmi
from oslo_config import cfg
from oslo_log import log as logging
from ceilometer.compute.virt import inspector
from ceilometer.i18n import _
CONF = cfg.CONF
LOG = logging.getLogger(__name__)
class HyperVException(inspector.InspectorException):
pass
class UtilsV2(object):
_VIRTUAL_SYSTEM_TYPE_REALIZED = 'Microsoft:Hyper-V:System:Realized'
_PROC_SETTING = 'Msvm_ProcessorSettingData'
_MEMORY_SETTING = "Msvm_MemorySettingData"
_SYNTH_ETH_PORT = 'Msvm_SyntheticEthernetPortSettingData'
_ETH_PORT_ALLOC = 'Msvm_EthernetPortAllocationSettingData'
_PORT_ACL_SET_DATA = 'Msvm_EthernetSwitchPortAclSettingData'
_STORAGE_ALLOC = 'Msvm_StorageAllocationSettingData'
_VS_SETTING_DATA = 'Msvm_VirtualSystemSettingData'
_METRICS_ME = 'Msvm_MetricForME'
_BASE_METRICS_VALUE = 'Msvm_BaseMetricValue'
_CPU_METRIC_NAME = 'Aggregated Average CPU Utilization'
_MEMORY_METRIC_NAME = 'Aggregated Average Memory Utilization'
_NET_IN_METRIC_NAME = 'Filtered Incoming Network Traffic'
_NET_OUT_METRIC_NAME = 'Filtered Outgoing Network Traffic'
# Disk metrics are supported from Hyper-V 2012 R2
_DISK_RD_METRIC_NAME = 'Disk Data Read'
_DISK_WR_METRIC_NAME = 'Disk Data Written'
_DISK_LATENCY_METRIC_NAME = 'Average Disk Latency'
_DISK_IOPS_METRIC_NAME = 'Average Normalized Disk Throughput'
def __init__(self, host='.'):
if sys.platform == 'win32':
self._init_hyperv_wmi_conn(host)
self._init_cimv2_wmi_conn(host)
self._host_cpu_info = None
def _init_hyperv_wmi_conn(self, host):
self._conn = wmi.WMI(moniker='//%s/root/virtualization/v2' % host)
def _init_cimv2_wmi_conn(self, host):
self._conn_cimv2 = wmi.WMI(moniker='//%s/root/cimv2' % host)
def get_host_cpu_info(self):
if not self._host_cpu_info:
host_cpus = self._conn_cimv2.Win32_Processor()
self._host_cpu_info = (host_cpus[0].MaxClockSpeed, len(host_cpus))
return self._host_cpu_info
def get_all_vms(self):
vms = [(v.ElementName, v.Name) for v in
self._conn.Msvm_ComputerSystem(['ElementName', 'Name'],
Caption="Virtual Machine")]
return vms
def get_cpu_metrics(self, vm_name):
vm = self._lookup_vm(vm_name)
cpu_sd = self._get_vm_resources(vm, self._PROC_SETTING)[0]
cpu_metrics_def = self._get_metric_def(self._CPU_METRIC_NAME)
cpu_metric_aggr = self._get_metrics(vm, cpu_metrics_def)
cpu_used = 0
if cpu_metric_aggr:
cpu_used = int(cpu_metric_aggr[0].MetricValue)
return (cpu_used,
int(cpu_sd.VirtualQuantity),
int(vm.OnTimeInMilliseconds))
def get_memory_metrics(self, vm_name):
vm = self._lookup_vm(vm_name)
memory_def = self._get_metric_def(self._MEMORY_METRIC_NAME)
metric_memory = self._get_metrics(vm, memory_def)
memory_usage = 0
if metric_memory:
memory_usage = int(metric_memory[0].MetricValue)
return memory_usage
def get_vnic_metrics(self, vm_name):
vm = self._lookup_vm(vm_name)
ports = self._get_vm_resources(vm, self._ETH_PORT_ALLOC)
vnics = self._get_vm_resources(vm, self._SYNTH_ETH_PORT)
metric_def_in = self._get_metric_def(self._NET_IN_METRIC_NAME)
metric_def_out = self._get_metric_def(self._NET_OUT_METRIC_NAME)
for port in ports:
vnic = [v for v in vnics if port.Parent == v.path_()][0]
metric_value_instances = self._get_metric_value_instances(
port.associators(wmi_result_class=self._PORT_ACL_SET_DATA),
self._BASE_METRICS_VALUE)
metric_values = self._sum_metric_values_by_defs(
metric_value_instances, [metric_def_in, metric_def_out])
yield {
'rx_mb': metric_values[0],
'tx_mb': metric_values[1],
'element_name': vnic.ElementName,
'address': vnic.Address
}
def get_disk_metrics(self, vm_name):
vm = self._lookup_vm(vm_name)
metric_def_r = self._get_metric_def(self._DISK_RD_METRIC_NAME)
metric_def_w = self._get_metric_def(self._DISK_WR_METRIC_NAME)
disks = self._get_vm_resources(vm, self._STORAGE_ALLOC)
for disk in disks:
metric_values = self._get_metric_values(
disk, [metric_def_r, metric_def_w])
# Thi sis e.g. the VHD file location
if disk.HostResource:
host_resource = disk.HostResource[0]
yield {
# Values are in megabytes
'read_mb': metric_values[0],
'write_mb': metric_values[1],
'instance_id': disk.InstanceID,
'host_resource': host_resource
}
def get_disk_latency_metrics(self, vm_name):
vm = self._lookup_vm(vm_name)
metric_latency_def = self._get_metric_def(
self._DISK_LATENCY_METRIC_NAME)
disks = self._get_vm_resources(vm, self._STORAGE_ALLOC)
for disk in disks:
metric_values = self._get_metric_values(
disk, [metric_latency_def])
yield {
'disk_latency': metric_values[0],
'instance_id': disk.InstanceID,
}
def get_disk_iops_count(self, vm_name):
vm = self._lookup_vm(vm_name)
metric_def_iops = self._get_metric_def(self._DISK_IOPS_METRIC_NAME)
disks = self._get_vm_resources(vm, self._STORAGE_ALLOC)
for disk in disks:
metric_values = self._get_metric_values(
disk, [metric_def_iops])
yield {
'iops_count': metric_values[0],
'instance_id': disk.InstanceID,
}
@staticmethod
def _sum_metric_values(metrics):
tot_metric_val = 0
for metric in metrics:
tot_metric_val += int(metric.MetricValue)
return tot_metric_val
def _sum_metric_values_by_defs(self, element_metrics, metric_defs):
metric_values = []
for metric_def in metric_defs:
if metric_def:
metrics = self._filter_metrics(element_metrics, metric_def)
metric_values.append(self._sum_metric_values(metrics))
else:
# In case the metric is not defined on this host
metric_values.append(0)
return metric_values
@staticmethod
def _get_metric_value_instances(elements, result_class):
instances = []
for el in elements:
associators = el.associators(wmi_result_class=result_class)
if associators:
instances.append(associators[0])
return instances
def _get_metric_values(self, element, metric_defs):
element_metrics = element.associators(
wmi_association_class=self._METRICS_ME)
return self._sum_metric_values_by_defs(element_metrics, metric_defs)
def _lookup_vm(self, vm_name):
vms = self._conn.Msvm_ComputerSystem(ElementName=vm_name)
n = len(vms)
if n == 0:
raise inspector.InstanceNotFoundException(
_('VM %s not found on Hyper-V') % vm_name)
elif n > 1:
raise HyperVException(_('Duplicate VM name found: %s') % vm_name)
else:
return vms[0]
def _get_metrics(self, element, metric_def):
return self._filter_metrics(
element.associators(
wmi_association_class=self._METRICS_ME), metric_def)
@staticmethod
def _filter_metrics(all_metrics, metric_def):
return [v for v in all_metrics if
v.MetricDefinitionId == metric_def.Id]
def _get_metric_def(self, metric_def):
metric = self._conn.CIM_BaseMetricDefinition(ElementName=metric_def)
if metric:
return metric[0]
def _get_vm_setting_data(self, vm):
vm_settings = vm.associators(
wmi_result_class=self._VS_SETTING_DATA)
# Avoid snapshots
return [s for s in vm_settings if
s.VirtualSystemType == self._VIRTUAL_SYSTEM_TYPE_REALIZED][0]
def _get_vm_resources(self, vm, resource_class):
setting_data = self._get_vm_setting_data(vm)
return setting_data.associators(wmi_result_class=resource_class)

View File

@ -1,291 +0,0 @@
#
# Copyright 2012 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Inspector abstraction for read-only access to hypervisors."""
import collections
from oslo_config import cfg
from oslo_log import log
from stevedore import driver
import ceilometer
from ceilometer.i18n import _
OPTS = [
cfg.StrOpt('hypervisor_inspector',
default='libvirt',
help='Inspector to use for inspecting the hypervisor layer.'),
]
cfg.CONF.register_opts(OPTS)
LOG = log.getLogger(__name__)
# Named tuple representing instances.
#
# name: the name of the instance
# uuid: the UUID associated with the instance
#
Instance = collections.namedtuple('Instance', ['name', 'UUID'])
# Named tuple representing CPU statistics.
#
# number: number of CPUs
# time: cumulative CPU time
#
CPUStats = collections.namedtuple('CPUStats', ['number', 'time'])
# Named tuple representing CPU Utilization statistics.
#
# util: CPU utilization in percentage
#
CPUUtilStats = collections.namedtuple('CPUUtilStats', ['util'])
# Named tuple representing Memory usage statistics.
#
# usage: Amount of memory used
#
MemoryUsageStats = collections.namedtuple('MemoryUsageStats', ['usage'])
# Named tuple representing Resident Memory usage statistics.
#
# resident: Amount of resident memory
#
MemoryResidentStats = collections.namedtuple('MemoryResidentStats',
['resident'])
# Named tuple representing vNICs.
#
# name: the name of the vNIC
# mac: the MAC address
# fref: the filter ref
# parameters: miscellaneous parameters
#
Interface = collections.namedtuple('Interface', ['name', 'mac',
'fref', 'parameters'])
# Named tuple representing vNIC statistics.
#
# rx_bytes: number of received bytes
# rx_packets: number of received packets
# tx_bytes: number of transmitted bytes
# tx_packets: number of transmitted packets
#
InterfaceStats = collections.namedtuple('InterfaceStats',
['rx_bytes', 'rx_packets',
'tx_bytes', 'tx_packets'])
# Named tuple representing vNIC rate statistics.
#
# rx_bytes_rate: rate of received bytes
# tx_bytes_rate: rate of transmitted bytes
#
InterfaceRateStats = collections.namedtuple('InterfaceRateStats',
['rx_bytes_rate', 'tx_bytes_rate'])
# Named tuple representing disks.
#
# device: the device name for the disk
#
Disk = collections.namedtuple('Disk', ['device'])
# Named tuple representing disk statistics.
#
# read_bytes: number of bytes read
# read_requests: number of read operations
# write_bytes: number of bytes written
# write_requests: number of write operations
# errors: number of errors
#
DiskStats = collections.namedtuple('DiskStats',
['read_bytes', 'read_requests',
'write_bytes', 'write_requests',
'errors'])
# Named tuple representing disk rate statistics.
#
# read_bytes_rate: number of bytes read per second
# read_requests_rate: number of read operations per second
# write_bytes_rate: number of bytes written per second
# write_requests_rate: number of write operations per second
#
DiskRateStats = collections.namedtuple('DiskRateStats',
['read_bytes_rate',
'read_requests_rate',
'write_bytes_rate',
'write_requests_rate'])
# Named tuple representing disk latency statistics.
#
# disk_latency: average disk latency
#
DiskLatencyStats = collections.namedtuple('DiskLatencyStats',
['disk_latency'])
# Named tuple representing disk iops statistics.
#
# iops: number of iops per second
#
DiskIOPSStats = collections.namedtuple('DiskIOPSStats',
['iops_count'])
# Named tuple representing disk Information.
#
# capacity: capacity of the disk
# allocation: allocation of the disk
# physical: usage of the disk
DiskInfo = collections.namedtuple('DiskInfo',
['capacity',
'allocation',
'physical'])
# Exception types
#
class InspectorException(Exception):
def __init__(self, message=None):
super(InspectorException, self).__init__(message)
class InstanceNotFoundException(InspectorException):
pass
class InstanceShutOffException(InspectorException):
pass
class NoDataException(InspectorException):
pass
# Main virt inspector abstraction layering over the hypervisor API.
#
class Inspector(object):
def inspect_cpus(self, instance):
"""Inspect the CPU statistics for an instance.
:param instance: the target instance
:return: the number of CPUs and cumulative CPU time
"""
raise ceilometer.NotImplementedError
def inspect_cpu_util(self, instance, duration=None):
"""Inspect the CPU Utilization (%) for an instance.
:param instance: the target instance
:param duration: the last 'n' seconds, over which the value should be
inspected
:return: the percentage of CPU utilization
"""
raise ceilometer.NotImplementedError
def inspect_vnics(self, instance):
"""Inspect the vNIC statistics for an instance.
:param instance: the target instance
:return: for each vNIC, the number of bytes & packets
received and transmitted
"""
raise ceilometer.NotImplementedError
def inspect_vnic_rates(self, instance, duration=None):
"""Inspect the vNIC rate statistics for an instance.
:param instance: the target instance
:param duration: the last 'n' seconds, over which the value should be
inspected
:return: for each vNIC, the rate of bytes & packets
received and transmitted
"""
raise ceilometer.NotImplementedError
def inspect_disks(self, instance):
"""Inspect the disk statistics for an instance.
:param instance: the target instance
:return: for each disk, the number of bytes & operations
read and written, and the error count
"""
raise ceilometer.NotImplementedError
def inspect_memory_usage(self, instance, duration=None):
"""Inspect the memory usage statistics for an instance.
:param instance: the target instance
:param duration: the last 'n' seconds, over which the value should be
inspected
:return: the amount of memory used
"""
raise ceilometer.NotImplementedError
def inspect_disk_rates(self, instance, duration=None):
"""Inspect the disk statistics as rates for an instance.
:param instance: the target instance
:param duration: the last 'n' seconds, over which the value should be
inspected
:return: for each disk, the number of bytes & operations
read and written per second, with the error count
"""
raise ceilometer.NotImplementedError
def inspect_disk_latency(self, instance):
"""Inspect the disk statistics as rates for an instance.
:param instance: the target instance
:return: for each disk, the average disk latency
"""
raise ceilometer.NotImplementedError
def inspect_disk_iops(self, instance):
"""Inspect the disk statistics as rates for an instance.
:param instance: the target instance
:return: for each disk, the number of iops per second
"""
raise ceilometer.NotImplementedError
def inspect_disk_info(self, instance):
"""Inspect the disk information for an instance.
:param instance: the target instance
:return: for each disk , capacity , alloaction and usage
"""
raise ceilometer.NotImplementedError
def get_hypervisor_inspector():
try:
namespace = 'ceilometer.compute.virt'
mgr = driver.DriverManager(namespace,
cfg.CONF.hypervisor_inspector,
invoke_on_load=True)
return mgr.driver
except ImportError as e:
LOG.error(_("Unable to load the hypervisor inspector: %s") % e)
return Inspector()

View File

@ -1,219 +0,0 @@
#
# Copyright 2012 Red Hat, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of Inspector abstraction for libvirt."""
from lxml import etree
from oslo_config import cfg
from oslo_log import log as logging
from oslo_utils import units
import six
from ceilometer.compute.pollsters import util
from ceilometer.compute.virt import inspector as virt_inspector
from ceilometer.i18n import _
libvirt = None
LOG = logging.getLogger(__name__)
OPTS = [
cfg.StrOpt('libvirt_type',
default='kvm',
choices=['kvm', 'lxc', 'qemu', 'uml', 'xen'],
help='Libvirt domain type.'),
cfg.StrOpt('libvirt_uri',
default='',
help='Override the default libvirt URI '
'(which is dependent on libvirt_type).'),
]
CONF = cfg.CONF
CONF.register_opts(OPTS)
def retry_on_disconnect(function):
def decorator(self, *args, **kwargs):
try:
return function(self, *args, **kwargs)
except libvirt.libvirtError as e:
if (e.get_error_code() == libvirt.VIR_ERR_SYSTEM_ERROR and
e.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
libvirt.VIR_FROM_RPC)):
LOG.debug(_('Connection to libvirt broken'))
self.connection = None
return function(self, *args, **kwargs)
else:
raise
return decorator
class LibvirtInspector(virt_inspector.Inspector):
per_type_uris = dict(uml='uml:///system', xen='xen:///', lxc='lxc:///')
def __init__(self):
self.uri = self._get_uri()
self.connection = None
def _get_uri(self):
return CONF.libvirt_uri or self.per_type_uris.get(CONF.libvirt_type,
'qemu:///system')
def _get_connection(self):
if not self.connection:
global libvirt
if libvirt is None:
libvirt = __import__('libvirt')
LOG.debug(_('Connecting to libvirt: %s'), self.uri)
self.connection = libvirt.openReadOnly(self.uri)
return self.connection
@retry_on_disconnect
def _lookup_by_uuid(self, instance):
instance_name = util.instance_name(instance)
try:
return self._get_connection().lookupByUUIDString(instance.id)
except Exception as ex:
if not libvirt or not isinstance(ex, libvirt.libvirtError):
raise virt_inspector.InspectorException(six.text_type(ex))
error_code = ex.get_error_code()
if (error_code == libvirt.VIR_ERR_SYSTEM_ERROR and
ex.get_error_domain() in (libvirt.VIR_FROM_REMOTE,
libvirt.VIR_FROM_RPC)):
raise
msg = _("Error from libvirt while looking up instance "
"<name=%(name)s, id=%(id)s>: "
"[Error Code %(error_code)s] "
"%(ex)s") % {'name': instance_name,
'id': instance.id,
'error_code': error_code,
'ex': ex}
raise virt_inspector.InstanceNotFoundException(msg)
def inspect_cpus(self, instance):
domain = self._lookup_by_uuid(instance)
dom_info = domain.info()
return virt_inspector.CPUStats(number=dom_info[3], time=dom_info[4])
def _get_domain_not_shut_off_or_raise(self, instance):
instance_name = util.instance_name(instance)
domain = self._lookup_by_uuid(instance)
state = domain.info()[0]
if state == libvirt.VIR_DOMAIN_SHUTOFF:
msg = _('Failed to inspect data of instance '
'<name=%(name)s, id=%(id)s>, '
'domain state is SHUTOFF.') % {
'name': instance_name, 'id': instance.id}
raise virt_inspector.InstanceShutOffException(msg)
return domain
def inspect_vnics(self, instance):
domain = self._get_domain_not_shut_off_or_raise(instance)
tree = etree.fromstring(domain.XMLDesc(0))
for iface in tree.findall('devices/interface'):
target = iface.find('target')
if target is not None:
name = target.get('dev')
else:
continue
mac = iface.find('mac')
if mac is not None:
mac_address = mac.get('address')
else:
continue
fref = iface.find('filterref')
if fref is not None:
fref = fref.get('filter')
params = dict((p.get('name').lower(), p.get('value'))
for p in iface.findall('filterref/parameter'))
interface = virt_inspector.Interface(name=name, mac=mac_address,
fref=fref, parameters=params)
dom_stats = domain.interfaceStats(name)
stats = virt_inspector.InterfaceStats(rx_bytes=dom_stats[0],
rx_packets=dom_stats[1],
tx_bytes=dom_stats[4],
tx_packets=dom_stats[5])
yield (interface, stats)
def inspect_disks(self, instance):
domain = self._get_domain_not_shut_off_or_raise(instance)
tree = etree.fromstring(domain.XMLDesc(0))
for device in filter(
bool,
[target.get("dev")
for target in tree.findall('devices/disk/target')]):
disk = virt_inspector.Disk(device=device)
block_stats = domain.blockStats(device)
stats = virt_inspector.DiskStats(read_requests=block_stats[0],
read_bytes=block_stats[1],
write_requests=block_stats[2],
write_bytes=block_stats[3],
errors=block_stats[4])
yield (disk, stats)
def inspect_memory_usage(self, instance, duration=None):
instance_name = util.instance_name(instance)
domain = self._get_domain_not_shut_off_or_raise(instance)
try:
memory_stats = domain.memoryStats()
if (memory_stats and
memory_stats.get('available') and
memory_stats.get('unused')):
memory_used = (memory_stats.get('available') -
memory_stats.get('unused'))
# Stat provided from libvirt is in KB, converting it to MB.
memory_used = memory_used / units.Ki
return virt_inspector.MemoryUsageStats(usage=memory_used)
else:
msg = _('Failed to inspect memory usage of instance '
'<name=%(name)s, id=%(id)s>, '
'can not get info from libvirt.') % {
'name': instance_name, 'id': instance.id}
raise virt_inspector.NoDataException(msg)
# memoryStats might launch an exception if the method is not supported
# by the underlying hypervisor being used by libvirt.
except libvirt.libvirtError as e:
msg = _('Failed to inspect memory usage of %(instance_uuid)s, '
'can not get info from libvirt: %(error)s') % {
'instance_uuid': instance.id, 'error': e}
raise virt_inspector.NoDataException(msg)
def inspect_disk_info(self, instance):
domain = self._get_domain_not_shut_off_or_raise(instance)
tree = etree.fromstring(domain.XMLDesc(0))
for device in filter(
bool,
[target.get("dev")
for target in tree.findall('devices/disk/target')]):
disk = virt_inspector.Disk(device=device)
block_info = domain.blockInfo(device)
info = virt_inspector.DiskInfo(capacity=block_info[0],
allocation=block_info[1],
physical=block_info[2])
yield (disk, info)
def inspect_memory_resident(self, instance, duration=None):
domain = self._get_domain_not_shut_off_or_raise(instance)
memory = domain.memoryStats()['rss'] / units.Ki
return virt_inspector.MemoryResidentStats(resident=memory)

View File

@ -1,199 +0,0 @@
# Copyright (c) 2014 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of Inspector abstraction for VMware vSphere"""
from oslo_config import cfg
from oslo_utils import units
from oslo_vmware import api
import six
from ceilometer.compute.virt import inspector as virt_inspector
from ceilometer.compute.virt.vmware import vsphere_operations
from ceilometer.i18n import _
opt_group = cfg.OptGroup(name='vmware',
title='Options for VMware')
OPTS = [
cfg.StrOpt('host_ip',
default='',
help='IP address of the VMware Vsphere host.'),
cfg.IntOpt('host_port',
default=443,
help='Port of the VMware Vsphere host.'),
cfg.StrOpt('host_username',
default='',
help='Username of VMware Vsphere.'),
cfg.StrOpt('host_password',
default='',
help='Password of VMware Vsphere.',
secret=True),
cfg.StrOpt('ca_file',
help='CA bundle file to use in verifying the vCenter server '
'certificate.'),
cfg.BoolOpt('insecure',
default=False,
help='If true, the vCenter server certificate is not '
'verified. If false, then the default CA truststore is '
'used for verification. This option is ignored if '
'"ca_file" is set.'),
cfg.IntOpt('api_retry_count',
default=10,
help='Number of times a VMware Vsphere API may be retried.'),
cfg.FloatOpt('task_poll_interval',
default=0.5,
help='Sleep time in seconds for polling an ongoing async '
'task.'),
cfg.StrOpt('wsdl_location',
help='Optional vim service WSDL location '
'e.g http://<server>/vimService.wsdl. '
'Optional over-ride to default location for bug '
'work-arounds.'),
]
cfg.CONF.register_group(opt_group)
cfg.CONF.register_opts(OPTS, group=opt_group)
VC_AVERAGE_MEMORY_CONSUMED_CNTR = 'mem:consumed:average'
VC_AVERAGE_CPU_CONSUMED_CNTR = 'cpu:usage:average'
VC_NETWORK_RX_COUNTER = 'net:received:average'
VC_NETWORK_TX_COUNTER = 'net:transmitted:average'
VC_DISK_READ_RATE_CNTR = "disk:read:average"
VC_DISK_READ_REQUESTS_RATE_CNTR = "disk:numberReadAveraged:average"
VC_DISK_WRITE_RATE_CNTR = "disk:write:average"
VC_DISK_WRITE_REQUESTS_RATE_CNTR = "disk:numberWriteAveraged:average"
def get_api_session():
api_session = api.VMwareAPISession(
cfg.CONF.vmware.host_ip,
cfg.CONF.vmware.host_username,
cfg.CONF.vmware.host_password,
cfg.CONF.vmware.api_retry_count,
cfg.CONF.vmware.task_poll_interval,
wsdl_loc=cfg.CONF.vmware.wsdl_location,
port=cfg.CONF.vmware.host_port,
cacert=cfg.CONF.vmware.ca_file,
insecure=cfg.CONF.vmware.insecure)
return api_session
class VsphereInspector(virt_inspector.Inspector):
def __init__(self):
super(VsphereInspector, self).__init__()
self._ops = vsphere_operations.VsphereOperations(
get_api_session(), 1000)
def inspect_cpu_util(self, instance, duration=None):
vm_moid = self._ops.get_vm_moid(instance.id)
if vm_moid is None:
raise virt_inspector.InstanceNotFoundException(
_('VM %s not found in VMware Vsphere') % instance.id)
cpu_util_counter_id = self._ops.get_perf_counter_id(
VC_AVERAGE_CPU_CONSUMED_CNTR)
cpu_util = self._ops.query_vm_aggregate_stats(
vm_moid, cpu_util_counter_id, duration)
# For this counter vSphere returns values scaled-up by 100, since the
# corresponding API can't return decimals, but only longs.
# For e.g. if the utilization is 12.34%, the value returned is 1234.
# Hence, dividing by 100.
cpu_util = cpu_util / 100
return virt_inspector.CPUUtilStats(util=cpu_util)
def inspect_vnic_rates(self, instance, duration=None):
vm_moid = self._ops.get_vm_moid(instance.id)
if not vm_moid:
raise virt_inspector.InstanceNotFoundException(
_('VM %s not found in VMware Vsphere') % instance.id)
vnic_stats = {}
vnic_ids = set()
for net_counter in (VC_NETWORK_RX_COUNTER, VC_NETWORK_TX_COUNTER):
net_counter_id = self._ops.get_perf_counter_id(net_counter)
vnic_id_to_stats_map = self._ops.query_vm_device_stats(
vm_moid, net_counter_id, duration)
vnic_stats[net_counter] = vnic_id_to_stats_map
vnic_ids.update(six.iterkeys(vnic_id_to_stats_map))
# Stats provided from vSphere are in KB/s, converting it to B/s.
for vnic_id in vnic_ids:
rx_bytes_rate = (vnic_stats[VC_NETWORK_RX_COUNTER]
.get(vnic_id, 0) * units.Ki)
tx_bytes_rate = (vnic_stats[VC_NETWORK_TX_COUNTER]
.get(vnic_id, 0) * units.Ki)
stats = virt_inspector.InterfaceRateStats(rx_bytes_rate,
tx_bytes_rate)
interface = virt_inspector.Interface(
name=vnic_id,
mac=None,
fref=None,
parameters=None)
yield (interface, stats)
def inspect_memory_usage(self, instance, duration=None):
vm_moid = self._ops.get_vm_moid(instance.id)
if vm_moid is None:
raise virt_inspector.InstanceNotFoundException(
_('VM %s not found in VMware Vsphere') % instance.id)
mem_counter_id = self._ops.get_perf_counter_id(
VC_AVERAGE_MEMORY_CONSUMED_CNTR)
memory = self._ops.query_vm_aggregate_stats(
vm_moid, mem_counter_id, duration)
# Stat provided from vSphere is in KB, converting it to MB.
memory = memory / units.Ki
return virt_inspector.MemoryUsageStats(usage=memory)
def inspect_disk_rates(self, instance, duration=None):
vm_moid = self._ops.get_vm_moid(instance.id)
if not vm_moid:
raise virt_inspector.InstanceNotFoundException(
_('VM %s not found in VMware Vsphere') % instance.id)
disk_stats = {}
disk_ids = set()
disk_counters = [
VC_DISK_READ_RATE_CNTR,
VC_DISK_READ_REQUESTS_RATE_CNTR,
VC_DISK_WRITE_RATE_CNTR,
VC_DISK_WRITE_REQUESTS_RATE_CNTR
]
for disk_counter in disk_counters:
disk_counter_id = self._ops.get_perf_counter_id(disk_counter)
disk_id_to_stat_map = self._ops.query_vm_device_stats(
vm_moid, disk_counter_id, duration)
disk_stats[disk_counter] = disk_id_to_stat_map
disk_ids.update(six.iterkeys(disk_id_to_stat_map))
for disk_id in disk_ids:
def stat_val(counter_name):
return disk_stats[counter_name].get(disk_id, 0)
disk = virt_inspector.Disk(device=disk_id)
# Stats provided from vSphere are in KB/s, converting it to B/s.
disk_rate_info = virt_inspector.DiskRateStats(
read_bytes_rate=stat_val(VC_DISK_READ_RATE_CNTR) * units.Ki,
read_requests_rate=stat_val(VC_DISK_READ_REQUESTS_RATE_CNTR),
write_bytes_rate=stat_val(VC_DISK_WRITE_RATE_CNTR) * units.Ki,
write_requests_rate=stat_val(VC_DISK_WRITE_REQUESTS_RATE_CNTR)
)
yield(disk, disk_rate_info)

View File

@ -1,230 +0,0 @@
# Copyright (c) 2014 VMware, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_vmware import vim_util
PERF_MANAGER_TYPE = "PerformanceManager"
PERF_COUNTER_PROPERTY = "perfCounter"
VM_INSTANCE_ID_PROPERTY = 'config.extraConfig["nvp.vm-uuid"].value'
# ESXi Servers sample performance data every 20 seconds. 20-second interval
# data is called instance data or real-time data. To retrieve instance data,
# we need to specify a value of 20 seconds for the "PerfQuerySpec.intervalId"
# property. In that case the "QueryPerf" method operates as a raw data feed
# that bypasses the vCenter database and instead retrieves performance data
# from an ESXi host.
# The following value is time interval for real-time performance stats
# in seconds and it is not configurable.
VC_REAL_TIME_SAMPLING_INTERVAL = 20
class VsphereOperations(object):
"""Class to invoke vSphere APIs calls.
vSphere APIs calls are required by various pollsters, collecting data from
VMware infrastructure.
"""
def __init__(self, api_session, max_objects):
self._api_session = api_session
self._max_objects = max_objects
# Mapping between "VM's Nova instance Id" -> "VM's MOID"
# In case a VM is deployed by Nova, then its name is instance ID.
# So this map essentially has VM names as keys.
self._vm_moid_lookup_map = {}
# Mapping from full name -> ID, for VC Performance counters
self._perf_counter_id_lookup_map = None
def _init_vm_moid_lookup_map(self):
session = self._api_session
result = session.invoke_api(vim_util, "get_objects", session.vim,
"VirtualMachine", self._max_objects,
[VM_INSTANCE_ID_PROPERTY],
False)
while result:
for vm_object in result.objects:
vm_moid = vm_object.obj.value
# propSet will be set only if the server provides value
if hasattr(vm_object, 'propSet') and vm_object.propSet:
vm_instance_id = vm_object.propSet[0].val
if vm_instance_id:
self._vm_moid_lookup_map[vm_instance_id] = vm_moid
result = session.invoke_api(vim_util, "continue_retrieval",
session.vim, result)
def get_vm_moid(self, vm_instance_id):
"""Method returns VC MOID of the VM by its NOVA instance ID."""
if vm_instance_id not in self._vm_moid_lookup_map:
self._init_vm_moid_lookup_map()
return self._vm_moid_lookup_map.get(vm_instance_id, None)
def _init_perf_counter_id_lookup_map(self):
# Query details of all the performance counters from VC
session = self._api_session
client_factory = session.vim.client.factory
perf_manager = session.vim.service_content.perfManager
prop_spec = vim_util.build_property_spec(
client_factory, PERF_MANAGER_TYPE, [PERF_COUNTER_PROPERTY])
obj_spec = vim_util.build_object_spec(
client_factory, perf_manager, None)
filter_spec = vim_util.build_property_filter_spec(
client_factory, [prop_spec], [obj_spec])
options = client_factory.create('ns0:RetrieveOptions')
options.maxObjects = 1
prop_collector = session.vim.service_content.propertyCollector
result = session.invoke_api(session.vim, "RetrievePropertiesEx",
prop_collector, specSet=[filter_spec],
options=options)
perf_counter_infos = result.objects[0].propSet[0].val.PerfCounterInfo
# Extract the counter Id for each counter and populate the map
self._perf_counter_id_lookup_map = {}
for perf_counter_info in perf_counter_infos:
counter_group = perf_counter_info.groupInfo.key
counter_name = perf_counter_info.nameInfo.key
counter_rollup_type = perf_counter_info.rollupType
counter_id = perf_counter_info.key
counter_full_name = (counter_group + ":" + counter_name + ":" +
counter_rollup_type)
self._perf_counter_id_lookup_map[counter_full_name] = counter_id
def get_perf_counter_id(self, counter_full_name):
"""Method returns the ID of VC performance counter by its full name.
A VC performance counter is uniquely identified by the
tuple {'Group Name', 'Counter Name', 'Rollup Type'}.
It will have an id - counter ID (changes from one VC to another),
which is required to query performance stats from that VC.
This method returns the ID for a counter,
assuming 'CounterFullName' => 'Group Name:CounterName:RollupType'.
"""
if not self._perf_counter_id_lookup_map:
self._init_perf_counter_id_lookup_map()
return self._perf_counter_id_lookup_map[counter_full_name]
# TODO(akhils@vmware.com) Move this method to common library
# when it gets checked-in
def query_vm_property(self, vm_moid, property_name):
"""Method returns the value of specified property for a VM.
:param vm_moid: moid of the VM whose property is to be queried
:param property_name: path of the property
"""
vm_mobj = vim_util.get_moref(vm_moid, "VirtualMachine")
session = self._api_session
return session.invoke_api(vim_util, "get_object_property",
session.vim, vm_mobj, property_name)
def query_vm_aggregate_stats(self, vm_moid, counter_id, duration):
"""Method queries the aggregated real-time stat value for a VM.
This method should be used for aggregate counters.
:param vm_moid: moid of the VM
:param counter_id: id of the perf counter in VC
:param duration: in seconds from current time,
over which the stat value was applicable
:return: the aggregated stats value for the counter
"""
# For aggregate counters, device_name should be ""
stats = self._query_vm_perf_stats(vm_moid, counter_id, "", duration)
# Performance manager provides the aggregated stats value
# with device name -> None
return stats.get(None, 0)
def query_vm_device_stats(self, vm_moid, counter_id, duration):
"""Method queries the real-time stat values for a VM, for all devices.
This method should be used for device(non-aggregate) counters.
:param vm_moid: moid of the VM
:param counter_id: id of the perf counter in VC
:param duration: in seconds from current time,
over which the stat value was applicable
:return: a map containing the stat values keyed by the device ID/name
"""
# For device counters, device_name should be "*" to get stat values
# for all devices.
stats = self._query_vm_perf_stats(vm_moid, counter_id, "*", duration)
# For some device counters, in addition to the per device value
# the Performance manager also returns the aggregated value.
# Just to be consistent, deleting the aggregated value if present.
stats.pop(None, None)
return stats
def _query_vm_perf_stats(self, vm_moid, counter_id, device_name, duration):
"""Method queries the real-time stat values for a VM.
:param vm_moid: moid of the VM for which stats are needed
:param counter_id: id of the perf counter in VC
:param device_name: name of the device for which stats are to be
queried. For aggregate counters pass empty string ("").
For device counters pass "*", if stats are required over all
devices.
:param duration: in seconds from current time,
over which the stat value was applicable
:return: a map containing the stat values keyed by the device ID/name
"""
session = self._api_session
client_factory = session.vim.client.factory
# Construct the QuerySpec
metric_id = client_factory.create('ns0:PerfMetricId')
metric_id.counterId = counter_id
metric_id.instance = device_name
query_spec = client_factory.create('ns0:PerfQuerySpec')
query_spec.entity = vim_util.get_moref(vm_moid, "VirtualMachine")
query_spec.metricId = [metric_id]
query_spec.intervalId = VC_REAL_TIME_SAMPLING_INTERVAL
# We query all samples which are applicable over the specified duration
samples_cnt = (int(duration / VC_REAL_TIME_SAMPLING_INTERVAL)
if duration and
duration >= VC_REAL_TIME_SAMPLING_INTERVAL else 1)
query_spec.maxSample = samples_cnt
perf_manager = session.vim.service_content.perfManager
perf_stats = session.invoke_api(session.vim, 'QueryPerf', perf_manager,
querySpec=[query_spec])
stat_values = {}
if perf_stats:
entity_metric = perf_stats[0]
sample_infos = entity_metric.sampleInfo
if len(sample_infos) > 0:
for metric_series in entity_metric.value:
# Take the average of all samples to improve the accuracy
# of the stat value
stat_value = float(sum(metric_series.value)) / samples_cnt
device_id = metric_series.id.instance
stat_values[device_id] = stat_value
return stat_values

View File

@ -1,175 +0,0 @@
# Copyright 2014 Intel
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Implementation of Inspector abstraction for XenAPI."""
from eventlet import timeout
from oslo_config import cfg
from oslo_utils import units
try:
import XenAPI as api
except ImportError:
api = None
from ceilometer.compute.pollsters import util
from ceilometer.compute.virt import inspector as virt_inspector
from ceilometer.i18n import _
opt_group = cfg.OptGroup(name='xenapi',
title='Options for XenAPI')
OPTS = [
cfg.StrOpt('connection_url',
help='URL for connection to XenServer/Xen Cloud Platform.'),
cfg.StrOpt('connection_username',
default='root',
help='Username for connection to XenServer/Xen Cloud '
'Platform.'),
cfg.StrOpt('connection_password',
help='Password for connection to XenServer/Xen Cloud Platform.',
secret=True),
cfg.IntOpt('login_timeout',
default=10,
help='Timeout in seconds for XenAPI login.'),
]
CONF = cfg.CONF
CONF.register_group(opt_group)
CONF.register_opts(OPTS, group=opt_group)
class XenapiException(virt_inspector.InspectorException):
pass
def get_api_session():
if not api:
raise ImportError(_('XenAPI not installed'))
url = CONF.xenapi.connection_url
username = CONF.xenapi.connection_username
password = CONF.xenapi.connection_password
if not url or password is None:
raise XenapiException(_('Must specify connection_url, and '
'connection_password to use'))
exception = api.Failure(_("Unable to log in to XenAPI "
"(is the Dom0 disk full?)"))
try:
session = api.Session(url)
with timeout.Timeout(CONF.xenapi.login_timeout, exception):
session.login_with_password(username, password)
except api.Failure as e:
msg = _("Could not connect to XenAPI: %s") % e.details[0]
raise XenapiException(msg)
return session
class XenapiInspector(virt_inspector.Inspector):
def __init__(self):
super(XenapiInspector, self).__init__()
self.session = get_api_session()
def _get_host_ref(self):
"""Return the xenapi host on which nova-compute runs on."""
return self.session.xenapi.session.get_this_host(self.session.handle)
def _call_xenapi(self, method, *args):
return self.session.xenapi_request(method, args)
def _lookup_by_name(self, instance_name):
vm_refs = self._call_xenapi("VM.get_by_name_label", instance_name)
n = len(vm_refs)
if n == 0:
raise virt_inspector.InstanceNotFoundException(
_('VM %s not found in XenServer') % instance_name)
elif n > 1:
raise XenapiException(
_('Multiple VM %s found in XenServer') % instance_name)
else:
return vm_refs[0]
def inspect_cpu_util(self, instance, duration=None):
instance_name = util.instance_name(instance)
vm_ref = self._lookup_by_name(instance_name)
metrics_ref = self._call_xenapi("VM.get_metrics", vm_ref)
metrics_rec = self._call_xenapi("VM_metrics.get_record",
metrics_ref)
vcpus_number = metrics_rec['VCPUs_number']
vcpus_utils = metrics_rec['VCPUs_utilisation']
if len(vcpus_utils) == 0:
msg = _("Could not get VM %s CPU Utilization") % instance_name
raise XenapiException(msg)
utils = 0.0
for num in range(int(vcpus_number)):
utils += vcpus_utils.get(str(num))
utils = utils / int(vcpus_number) * 100
return virt_inspector.CPUUtilStats(util=utils)
def inspect_memory_usage(self, instance, duration=None):
instance_name = util.instance_name(instance)
vm_ref = self._lookup_by_name(instance_name)
metrics_ref = self._call_xenapi("VM.get_metrics", vm_ref)
metrics_rec = self._call_xenapi("VM_metrics.get_record",
metrics_ref)
# Stat provided from XenServer is in B, converting it to MB.
memory = int(metrics_rec['memory_actual']) / units.Mi
return virt_inspector.MemoryUsageStats(usage=memory)
def inspect_vnic_rates(self, instance, duration=None):
instance_name = util.instance_name(instance)
vm_ref = self._lookup_by_name(instance_name)
vif_refs = self._call_xenapi("VM.get_VIFs", vm_ref)
if vif_refs:
for vif_ref in vif_refs:
vif_rec = self._call_xenapi("VIF.get_record", vif_ref)
vif_metrics_ref = self._call_xenapi(
"VIF.get_metrics", vif_ref)
vif_metrics_rec = self._call_xenapi(
"VIF_metrics.get_record", vif_metrics_ref)
interface = virt_inspector.Interface(
name=vif_rec['uuid'],
mac=vif_rec['MAC'],
fref=None,
parameters=None)
rx_rate = float(vif_metrics_rec['io_read_kbs']) * units.Ki
tx_rate = float(vif_metrics_rec['io_write_kbs']) * units.Ki
stats = virt_inspector.InterfaceRateStats(rx_rate, tx_rate)
yield (interface, stats)
def inspect_disk_rates(self, instance, duration=None):
instance_name = util.instance_name(instance)
vm_ref = self._lookup_by_name(instance_name)
vbd_refs = self._call_xenapi("VM.get_VBDs", vm_ref)
if vbd_refs:
for vbd_ref in vbd_refs:
vbd_rec = self._call_xenapi("VBD.get_record", vbd_ref)
vbd_metrics_ref = self._call_xenapi("VBD.get_metrics",
vbd_ref)
vbd_metrics_rec = self._call_xenapi("VBD_metrics.get_record",
vbd_metrics_ref)
disk = virt_inspector.Disk(device=vbd_rec['device'])
# Stats provided from XenServer are in KB/s,
# converting it to B/s.
read_rate = float(vbd_metrics_rec['io_read_kbs']) * units.Ki
write_rate = float(vbd_metrics_rec['io_write_kbs']) * units.Ki
disk_rate_info = virt_inspector.DiskRateStats(
read_bytes_rate=read_rate,
read_requests_rate=0,
write_bytes_rate=write_rate,
write_requests_rate=0)
yield(disk, disk_rate_info)

View File

@ -1,71 +0,0 @@
# Copyright (c) 2014 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_config import cfg
import oslo_messaging
from ceilometer.agent import plugin_base
from ceilometer import sample
OPTS = [
cfg.StrOpt('sahara_control_exchange',
default='sahara',
help="Exchange name for Data Processing notifications."),
]
cfg.CONF.register_opts(OPTS)
SERVICE = 'sahara'
class DataProcessing(plugin_base.NotificationBase,
plugin_base.NonMetricNotificationBase):
resource_name = '%s.cluster' % SERVICE
@property
def event_types(self):
return [
'%s.create' % self.resource_name,
'%s.update' % self.resource_name,
'%s.delete' % self.resource_name,
]
@staticmethod
def get_targets(conf):
"""Return a sequence of oslo_messaging.Target
It is defining the exchange and topics to be connected for this plugin.
"""
return [oslo_messaging.Target(topic=topic,
exchange=conf.sahara_control_exchange)
for topic in conf.notification_topics]
def process_notification(self, message):
name = message['event_type'].replace(self.resource_name, 'cluster')
project_id = message['payload']['project_id']
user_id = message['_context_user_id']
yield sample.Sample.from_notification(
name=name,
type=sample.TYPE_DELTA,
unit='cluster',
volume=1,
resource_id=message['payload']['cluster_id'],
user_id=user_id,
project_id=project_id,
message=message)

View File

@ -1,69 +0,0 @@
#
# Copyright 2013 IBM
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
from oslo_config import cfg
from oslo_log import log
import six
from stevedore import named
from ceilometer.i18n import _
LOG = log.getLogger(__name__)
OPTS = [
cfg.MultiStrOpt('dispatcher',
deprecated_group="collector",
default=['database'],
help='Dispatcher to process data.'),
]
cfg.CONF.register_opts(OPTS)
DISPATCHER_NAMESPACE = 'ceilometer.dispatcher'
def load_dispatcher_manager():
LOG.debug(_('loading dispatchers from %s'),
DISPATCHER_NAMESPACE)
# set propagate_map_exceptions to True to enable stevedore
# to propagate exceptions.
dispatcher_manager = named.NamedExtensionManager(
namespace=DISPATCHER_NAMESPACE,
names=cfg.CONF.dispatcher,
invoke_on_load=True,
invoke_args=[cfg.CONF],
propagate_map_exceptions=True)
if not list(dispatcher_manager):
LOG.warning(_('Failed to load any dispatchers for %s'),
DISPATCHER_NAMESPACE)
return dispatcher_manager
@six.add_metaclass(abc.ABCMeta)
class Base(object):
def __init__(self, conf):
self.conf = conf
@abc.abstractmethod
def record_metering_data(self, data):
"""Recording metering data interface."""
@abc.abstractmethod
def record_events(self, events):
"""Recording events interface."""

View File

@ -1,125 +0,0 @@
#
# Copyright 2013 IBM Corp
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from oslo_utils import timeutils
from ceilometer import dispatcher
from ceilometer.event.storage import models
from ceilometer.i18n import _, _LE, _LW
from ceilometer.publisher import utils as publisher_utils
from ceilometer import storage
LOG = log.getLogger(__name__)
class DatabaseDispatcher(dispatcher.Base):
"""Dispatcher class for recording metering data into database.
The dispatcher class which records each meter into a database configured
in ceilometer configuration file.
To enable this dispatcher, the following section needs to be present in
ceilometer.conf file
[DEFAULT]
dispatcher = database
"""
def __init__(self, conf):
super(DatabaseDispatcher, self).__init__(conf)
self._meter_conn = self._get_db_conn('metering', True)
self._event_conn = self._get_db_conn('event', True)
def _get_db_conn(self, purpose, ignore_exception=False):
try:
return storage.get_connection_from_config(self.conf, purpose)
except Exception as err:
params = {"purpose": purpose, "err": err}
LOG.exception(_LE("Failed to connect to db, purpose %(purpose)s "
"re-try later: %(err)s") % params)
if not ignore_exception:
raise
@property
def meter_conn(self):
if not self._meter_conn:
self._meter_conn = self._get_db_conn('metering')
return self._meter_conn
@property
def event_conn(self):
if not self._event_conn:
self._event_conn = self._get_db_conn('event')
return self._event_conn
def record_metering_data(self, data):
# We may have receive only one counter on the wire
if not isinstance(data, list):
data = [data]
for meter in data:
LOG.debug(_(
'metering data %(counter_name)s '
'for %(resource_id)s @ %(timestamp)s: %(counter_volume)s')
% ({'counter_name': meter['counter_name'],
'resource_id': meter['resource_id'],
'timestamp': meter.get('timestamp', 'NO TIMESTAMP'),
'counter_volume': meter['counter_volume']}))
if publisher_utils.verify_signature(
meter, self.conf.publisher.telemetry_secret):
try:
# Convert the timestamp to a datetime instance.
# Storage engines are responsible for converting
# that value to something they can store.
if meter.get('timestamp'):
ts = timeutils.parse_isotime(meter['timestamp'])
meter['timestamp'] = timeutils.normalize_time(ts)
self.meter_conn.record_metering_data(meter)
except Exception as err:
LOG.exception(_LE('Failed to record metering data: %s'),
err)
# raise the exception to propagate it up in the chain.
raise
else:
LOG.warning(_LW(
'message signature invalid, discarding message: %r'),
meter)
def record_events(self, events):
if not isinstance(events, list):
events = [events]
event_list = []
for ev in events:
try:
event_list.append(
models.Event(
message_id=ev['message_id'],
event_type=ev['event_type'],
generated=timeutils.normalize_time(
timeutils.parse_isotime(ev['generated'])),
traits=[models.Trait(
name, dtype,
models.Trait.convert_value(dtype, value))
for name, dtype, value in ev['traits']],
raw=ev.get('raw', {}))
)
except Exception:
LOG.exception(_LE("Error processing event and it will be "
"dropped: %s"), ev)
self.event_conn.record_events(event_list)

View File

@ -1,83 +0,0 @@
#
# Copyright 2013 IBM Corp
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import logging.handlers
from oslo_config import cfg
from ceilometer import dispatcher
OPTS = [
cfg.StrOpt('file_path',
help='Name and the location of the file to record '
'meters.'),
cfg.IntOpt('max_bytes',
default=0,
help='The max size of the file.'),
cfg.IntOpt('backup_count',
default=0,
help='The max number of the files to keep.'),
]
cfg.CONF.register_opts(OPTS, group="dispatcher_file")
class FileDispatcher(dispatcher.Base):
"""Dispatcher class for recording metering data to a file.
The dispatcher class which logs each meter into a file configured in
ceilometer configuration file. An example configuration may look like the
following:
[dispatcher_file]
file_path = /tmp/meters
To enable this dispatcher, the following section needs to be present in
ceilometer.conf file
[DEFAULT]
dispatcher = file
"""
def __init__(self, conf):
super(FileDispatcher, self).__init__(conf)
self.log = None
# if the directory and path are configured, then log to the file
if self.conf.dispatcher_file.file_path:
dispatcher_logger = logging.Logger('dispatcher.file')
dispatcher_logger.setLevel(logging.INFO)
# create rotating file handler which logs meters
rfh = logging.handlers.RotatingFileHandler(
self.conf.dispatcher_file.file_path,
maxBytes=self.conf.dispatcher_file.max_bytes,
backupCount=self.conf.dispatcher_file.backup_count,
encoding='utf8')
rfh.setLevel(logging.INFO)
# Only wanted the meters to be saved in the file, not the
# project root logger.
dispatcher_logger.propagate = False
dispatcher_logger.addHandler(rfh)
self.log = dispatcher_logger
def record_metering_data(self, data):
if self.log:
self.log.info(data)
def record_events(self, events):
if self.log:
self.log.info(events)

View File

@ -1,394 +0,0 @@
#
# Copyright 2014 eNovance
#
# Authors: Julien Danjou <julien@danjou.info>
# Mehdi Abaakouk <mehdi.abaakouk@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fnmatch
import threading
import itertools
import json
import operator
import os
import yaml
from ceilometer import dispatcher
from ceilometer.i18n import _
from ceilometer import keystone_client
from oslo_config import cfg
from oslo_log import log
import requests
import six
import stevedore.dispatch
LOG = log.getLogger(__name__)
dispatcher_opts = [
cfg.BoolOpt('filter_service_activity',
default=True,
help='Filter out samples generated by Gnocchi '
'service activity'),
cfg.StrOpt('filter_project',
default='gnocchi',
help='Gnocchi project used to filter out samples '
'generated by Gnocchi service activity'),
cfg.StrOpt('url',
default="http://localhost:8041",
help='URL to Gnocchi.'),
cfg.StrOpt('archive_policy',
default="low",
help='The archive policy to use when the dispatcher '
'create a new metric.'),
cfg.StrOpt('archive_policy_file',
default='gnocchi_archive_policy_map.yaml',
help=_('The Yaml file that defines per metric archive '
'policies.')),
]
cfg.CONF.register_opts(dispatcher_opts, group="dispatcher_gnocchi")
class UnexpectedWorkflowError(Exception):
pass
class NoSuchMetric(Exception):
pass
class MetricAlreadyExists(Exception):
pass
class NoSuchResource(Exception):
pass
class ResourceAlreadyExists(Exception):
pass
def log_and_ignore_unexpected_workflow_error(func):
def log_and_ignore(self, *args, **kwargs):
try:
func(self, *args, **kwargs)
except requests.ConnectionError as e:
with self._gnocchi_api_lock:
self._gnocchi_api = None
LOG.warn("Connection error, reconnecting...")
except UnexpectedWorkflowError as e:
LOG.error(six.text_type(e))
return log_and_ignore
class GnocchiDispatcher(dispatcher.Base):
def __init__(self, conf):
super(GnocchiDispatcher, self).__init__(conf)
self.conf = conf
self.filter_service_activity = (
conf.dispatcher_gnocchi.filter_service_activity)
self._ks_client = keystone_client.get_client()
self.gnocchi_url = conf.dispatcher_gnocchi.url
self.gnocchi_archive_policy_default = (
conf.dispatcher_gnocchi.archive_policy)
self.gnocchi_archive_policy_data = self._load_archive_policy(conf)
self.mgmr = stevedore.dispatch.DispatchExtensionManager(
'ceilometer.dispatcher.resource', lambda x: True,
invoke_on_load=True)
self._gnocchi_project_id = None
self._gnocchi_project_id_lock = threading.Lock()
self._gnocchi_api = None
self._gnocchi_api_lock = threading.Lock()
def _get_headers(self, content_type="application/json"):
return {
'Content-Type': content_type,
'X-Auth-Token': self._ks_client.auth_token,
}
def _load_archive_policy(self, conf):
policy_config_file = self._get_config_file(conf)
data = {}
if policy_config_file is not None:
with open(policy_config_file) as data_file:
try:
data = yaml.safe_load(data_file)
except ValueError:
data = {}
return data
def get_archive_policy(self, metric_name):
archive_policy = {}
if self.gnocchi_archive_policy_data is not None:
policy_match = self._match_metric(metric_name)
archive_policy['archive_policy_name'] = (
policy_match or self.gnocchi_archive_policy_default)
else:
LOG.debug(_("No archive policy file found!"
" Using default config."))
archive_policy['archive_policy_name'] = (
self.gnocchi_archive_policy_default)
return archive_policy
@staticmethod
def _get_config_file(conf):
config_file = conf.dispatcher_gnocchi.archive_policy_file
if not os.path.exists(config_file):
config_file = cfg.CONF.find_file(config_file)
return config_file
def _match_metric(self, metric_name):
for metric, policy in self.gnocchi_archive_policy_data.items():
# Support wild cards such as disk.*
if fnmatch.fnmatch(metric_name, metric):
return policy
@property
def gnocchi_project_id(self):
if self._gnocchi_project_id is not None:
return self._gnocchi_project_id
with self._gnocchi_project_id_lock:
if self._gnocchi_project_id is None:
try:
project = self._ks_client.tenants.find(
name=self.conf.dispatcher_gnocchi.filter_project)
except Exception:
LOG.exception('fail to retreive user of Gnocchi service')
raise
self._gnocchi_project_id = project.id
LOG.debug("gnocchi project found: %s" %
self.gnocchi_project_id)
return self._gnocchi_project_id
@property
def gnocchi_api(self):
"""return a working requests session object"""
if self._gnocchi_api is not None:
return self._gnocchi_api
with self._gnocchi_api_lock:
if self._gnocchi_api is None:
self._gnocchi_api = requests.session()
# NOTE(sileht): wait when the pool is empty
# instead of raising errors.
adapter = requests.adapters.HTTPAdapter(pool_block=True)
self._gnocchi_api.mount("http://", adapter)
self._gnocchi_api.mount("https://", adapter)
return self._gnocchi_api
def _is_gnocchi_activity(self, sample):
return (self.filter_service_activity and (
# avoid anything from the user used by gnocchi
sample['project_id'] == self.gnocchi_project_id or
# avoid anything in the swift account used by gnocchi
(sample['resource_id'] == self.gnocchi_project_id and
sample['counter_name'] in
self.mgmr['swift_account'].obj.get_metrics_names())
))
def record_metering_data(self, data):
# NOTE(sileht): skip sample generated by gnocchi itself
data = [s for s in data if not self._is_gnocchi_activity(s)]
# FIXME(sileht): This method bulk the processing of samples
# grouped by resource_id and metric_name but this is not
# efficient yet because the data received here doesn't often
# contains a lot of different kind of samples
# So perhaps the next step will be to pool the received data from
# message bus.
resource_grouped_samples = itertools.groupby(
data, key=operator.itemgetter('resource_id'))
for resource_id, samples_of_resource in resource_grouped_samples:
resource_need_to_be_updated = True
metric_grouped_samples = itertools.groupby(
list(samples_of_resource),
key=operator.itemgetter('counter_name'))
for metric_name, samples in metric_grouped_samples:
for ext in self.mgmr:
if metric_name in ext.obj.get_metrics_names():
self._process_samples(
ext, resource_id, metric_name, list(samples),
resource_need_to_be_updated)
# FIXME(sileht): Does it reasonable to skip the resource
# update here ? Does differents kind of counter_name
# can have different metadata set ?
# (ie: one have only flavor_id, and an other one have only
# image_ref ?)
#
# resource_need_to_be_updated = False
@log_and_ignore_unexpected_workflow_error
def _process_samples(self, ext, resource_id, metric_name, samples,
resource_need_to_be_updated):
resource_type = ext.name
measure_attributes = [{'timestamp': sample['timestamp'],
'value': sample['counter_volume']}
for sample in samples]
try:
self._post_measure(resource_type, resource_id, metric_name,
measure_attributes)
except NoSuchMetric:
# NOTE(sileht): we try first to create the resource, because
# they more chance that the resource doesn't exists than the metric
# is missing, the should be reduce the number of resource API call
resource_attributes = self._get_resource_attributes(
ext, resource_id, metric_name, samples)
try:
self._create_resource(resource_type, resource_id,
resource_attributes)
except ResourceAlreadyExists:
try:
self._create_metric(resource_type, resource_id,
metric_name)
except MetricAlreadyExists:
# NOTE(sileht): Just ignore the metric have been created in
# the meantime.
pass
else:
# No need to update it we just created it
# with everything we need
resource_need_to_be_updated = False
# NOTE(sileht): we retry to post the measure but if it fail we
# don't catch the exception to just log it and continue to process
# other samples
self._post_measure(resource_type, resource_id, metric_name,
measure_attributes)
if resource_need_to_be_updated:
resource_attributes = self._get_resource_attributes(
ext, resource_id, metric_name, samples, for_update=True)
self._update_resource(resource_type, resource_id,
resource_attributes)
def _get_resource_attributes(self, ext, resource_id, metric_name, samples,
for_update=False):
# FIXME(sileht): Should I merge attibutes of all samples ?
# Or keep only the last one is sufficient ?
attributes = ext.obj.get_resource_extra_attributes(
samples[-1])
if not for_update:
attributes["id"] = resource_id
attributes["user_id"] = samples[-1]['user_id']
attributes["project_id"] = samples[-1]['project_id']
attributes["metrics"] = dict(
(metric_name, self.get_archive_policy(metric_name))
for metric_name in ext.obj.get_metrics_names()
)
return attributes
def _post_measure(self, resource_type, resource_id, metric_name,
measure_attributes):
r = self.gnocchi_api.post("%s/v1/resource/%s/%s/metric/%s/measures"
% (self.gnocchi_url, resource_type,
resource_id, metric_name),
headers=self._get_headers(),
data=json.dumps(measure_attributes))
if r.status_code == 404:
LOG.debug(_("The metric %(metric_name)s of "
"resource %(resource_id)s doesn't exists: "
"%(status_code)d"),
{'metric_name': metric_name,
'resource_id': resource_id,
'status_code': r.status_code})
raise NoSuchMetric
elif int(r.status_code / 100) != 2:
raise UnexpectedWorkflowError(
_("Fail to post measure on metric %(metric_name)s of "
"resource %(resource_id)s with status: "
"%(status_code)d: %(msg)s") %
{'metric_name': metric_name,
'resource_id': resource_id,
'status_code': r.status_code,
'msg': r.text})
else:
LOG.debug("Measure posted on metric %s of resource %s",
metric_name, resource_id)
def _create_resource(self, resource_type, resource_id,
resource_attributes):
r = self.gnocchi_api.post("%s/v1/resource/%s"
% (self.gnocchi_url, resource_type),
headers=self._get_headers(),
data=json.dumps(resource_attributes))
if r.status_code == 409:
LOG.debug("Resource %s already exists", resource_id)
raise ResourceAlreadyExists
elif int(r.status_code / 100) != 2:
raise UnexpectedWorkflowError(
_("Resource %(resource_id)s creation failed with "
"status: %(status_code)d: %(msg)s") %
{'resource_id': resource_id,
'status_code': r.status_code,
'msg': r.text})
else:
LOG.debug("Resource %s created", resource_id)
def _update_resource(self, resource_type, resource_id,
resource_attributes):
r = self.gnocchi_api.patch(
"%s/v1/resource/%s/%s"
% (self.gnocchi_url, resource_type, resource_id),
headers=self._get_headers(),
data=json.dumps(resource_attributes))
if int(r.status_code / 100) != 2:
raise UnexpectedWorkflowError(
_("Resource %(resource_id)s update failed with "
"status: %(status_code)d: %(msg)s") %
{'resource_id': resource_id,
'status_code': r.status_code,
'msg': r.text})
else:
LOG.debug("Resource %s updated", resource_id)
def _create_metric(self, resource_type, resource_id, metric_name):
params = {metric_name: self.get_archive_policy(metric_name)}
r = self.gnocchi_api.post("%s/v1/resource/%s/%s/metric"
% (self.gnocchi_url, resource_type,
resource_id),
headers=self._get_headers(),
data=json.dumps(params))
if r.status_code == 409:
LOG.debug("Metric %s of resource %s already exists",
metric_name, resource_id)
raise MetricAlreadyExists
elif int(r.status_code / 100) != 2:
raise UnexpectedWorkflowError(
_("Fail to create metric %(metric_name)s of "
"resource %(resource_id)s with status: "
"%(status_code)d: %(msg)s") %
{'metric_name': metric_name,
'resource_id': resource_id,
'status_code': r.status_code,
'msg': r.text})
else:
LOG.debug("Metric %s of resource %s created",
metric_name, resource_id)
@staticmethod
def record_events(events):
raise NotImplementedError

View File

@ -1,138 +0,0 @@
# Copyright 2013 IBM Corp
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
from oslo_config import cfg
from oslo_log import log
import requests
from ceilometer import dispatcher
from ceilometer.i18n import _, _LE
from ceilometer.publisher import utils as publisher_utils
LOG = log.getLogger(__name__)
http_dispatcher_opts = [
cfg.StrOpt('target',
default='',
help='The target where the http request will be sent. '
'If this is not set, no data will be posted. For '
'example: target = http://hostname:1234/path'),
cfg.StrOpt('event_target',
help='The target for event data where the http request '
'will be sent to. If this is not set, it will default '
'to same as Sample target.'),
cfg.BoolOpt('cadf_only',
default=False,
help='The flag that indicates if only cadf message should '
'be posted. If false, all meters will be posted.'),
cfg.IntOpt('timeout',
default=5,
help='The max time in seconds to wait for a request to '
'timeout.'),
]
cfg.CONF.register_opts(http_dispatcher_opts, group="dispatcher_http")
class HttpDispatcher(dispatcher.Base):
"""Dispatcher class for posting metering data into a http target.
To enable this dispatcher, the following option needs to be present in
ceilometer.conf file::
[DEFAULT]
dispatcher = http
Dispatcher specific options can be added as follows::
[dispatcher_http]
target = www.example.com
event_target = www.example.com
cadf_only = true
timeout = 2
"""
def __init__(self, conf):
super(HttpDispatcher, self).__init__(conf)
self.headers = {'Content-type': 'application/json'}
self.timeout = self.conf.dispatcher_http.timeout
self.target = self.conf.dispatcher_http.target
self.event_target = (self.conf.dispatcher_http.event_target or
self.target)
self.cadf_only = self.conf.dispatcher_http.cadf_only
def record_metering_data(self, data):
if self.target == '':
# if the target was not set, do not do anything
LOG.error(_('Dispatcher target was not set, no meter will '
'be posted. Set the target in the ceilometer.conf '
'file'))
return
# We may have receive only one counter on the wire
if not isinstance(data, list):
data = [data]
for meter in data:
LOG.debug(_(
'metering data %(counter_name)s '
'for %(resource_id)s @ %(timestamp)s: %(counter_volume)s')
% ({'counter_name': meter['counter_name'],
'resource_id': meter['resource_id'],
'timestamp': meter.get('timestamp', 'NO TIMESTAMP'),
'counter_volume': meter['counter_volume']}))
if publisher_utils.verify_signature(
meter, self.conf.publisher.telemetry_secret):
try:
if self.cadf_only:
# Only cadf messages are being wanted.
req_data = meter.get('resource_metadata',
{}).get('request')
if req_data and 'CADF_EVENT' in req_data:
data = req_data['CADF_EVENT']
else:
continue
else:
# Every meter should be posted to the target
data = meter
res = requests.post(self.target,
data=json.dumps(data),
headers=self.headers,
timeout=self.timeout)
LOG.debug(_('Message posting finished with status code '
'%d.') % res.status_code)
except Exception as err:
LOG.exception(_('Failed to record metering data: %s'),
err)
else:
LOG.warning(_(
'message signature invalid, discarding message: %r'),
meter)
def record_events(self, events):
if not isinstance(events, list):
events = [events]
for event in events:
res = None
try:
res = requests.post(self.event_target, data=event,
headers=self.headers, timeout=self.timeout)
res.raise_for_status()
except Exception:
error_code = res.status_code if res else 'unknown'
LOG.exception(_LE('Status Code: %{code}s. Failed to dispatch '
'event: %{event}s'),
{'code': error_code, 'event': event})

View File

@ -1,40 +0,0 @@
#
# Copyright 2014 eNovance
#
# Authors: Mehdi Abaakouk <mehdi.abaakouk@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class ResourceBase(object):
"""Base class for resource."""
@abc.abstractmethod
def get_resource_extra_attributes(self, sample):
"""Extract the metadata from a ceilometer sample.
:param sample: The ceilometer sample
:returns: the resource attributes
"""
@abc.abstractmethod
def get_metrics_names(self):
"""Return the metric handled by this resource.
:returns: list of metric names
"""

View File

@ -1,31 +0,0 @@
#
# Copyright 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometer.dispatcher.resources import base
class CephAccount(base.ResourceBase):
@staticmethod
def get_resource_extra_attributes(sample):
return {}
@staticmethod
def get_metrics_names():
return ['radosgw.api.request',
'radosgw.objects.size',
'radosgw.objects',
'radosgw.objects.containers',
'radosgw.containers.objects',
'radosgw.containers.objects.size',
]

View File

@ -1,44 +0,0 @@
#
# Copyright 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometer.dispatcher.resources import base
class Identity(base.ResourceBase):
@staticmethod
def get_resource_extra_attributes(sample):
return {}
@staticmethod
def get_metrics_names():
return ['identity.authenticate.success',
'identity.authenticate.pending',
'identity.authenticate.failure',
'identity.user.created',
'identity.user.deleted',
'identity.user.updated',
'identity.group.created',
'identity.group.deleted',
'identity.group.updated',
'identity.role.created',
'identity.role.deleted',
'identity.role.updated',
'identity.project.created',
'identity.project.deleted',
'identity.project.updated',
'identity.trust.created',
'identity.trust.deleted',
'identity.role_assignment.created',
'identity.role_assignment.deleted',
]

View File

@ -1,30 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometer.dispatcher.resources import base
class Image(base.ResourceBase):
@staticmethod
def get_resource_extra_attributes(sample):
metadata = sample['resource_metadata']
params = {
"name": metadata['name'],
"container_format": metadata["container_format"],
"disk_format": metadata["disk_format"]
}
return params
@staticmethod
def get_metrics_names():
return ['image',
'image.size']

View File

@ -1,54 +0,0 @@
#
# Copyright 2014 eNovance
#
# Authors: Julien Danjou <julien@danjou.info>
# Mehdi Abaakouk <mehdi.abaakouk@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometer.dispatcher.resources import base
class Instance(base.ResourceBase):
@staticmethod
def get_resource_extra_attributes(sample):
metadata = sample['resource_metadata']
params = {
"host": metadata['host'],
"image_ref": metadata['image_ref_url'],
"display_name": metadata['display_name'],
}
if "instance_flavor_id" in metadata:
params["flavor_id"] = int(metadata['instance_flavor_id'])
else:
# NOTE(sileht): instance.exists have the flavor here
params["flavor_id"] = int(metadata["flavor"]["id"])
server_group = metadata.get('user_metadata', {}).get('server_group')
if server_group:
params["server_group"] = server_group
return params
@staticmethod
def get_metrics_names():
# NOTE(sileht): Can we generate the list by loading ceilometer
# plugin ?
return ['instance',
'disk.root.size',
'disk.ephemeral.size',
'memory',
'memory.usage',
'vcpus',
'cpu',
'cpu_util']

View File

@ -1,30 +0,0 @@
#
# Copyright 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometer.dispatcher.resources import base
class IPMI(base.ResourceBase):
@staticmethod
def get_resource_extra_attributes(sample):
return {}
@staticmethod
def get_metrics_names():
return ['hardware.ipmi.node.power',
'hardware.ipmi.node.temperature',
'hardware.ipmi.node.fan',
'hardware.ipmi.node.current',
'hardware.ipmi.node.voltage',
]

View File

@ -1,41 +0,0 @@
#
# Copyright 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometer.dispatcher.resources import base
class Network(base.ResourceBase):
@staticmethod
def get_resource_extra_attributes(sample):
return {}
@staticmethod
def get_metrics_names():
return ['bandwidth',
'network',
'network.create',
'network.update',
'subnet',
'subnet.create',
'subnet.update',
'port',
'port.create',
'port.update',
'router',
'router.create',
'router.update',
'ip.floating',
'ip.floating.create',
'ip.floating.update',
]

View File

@ -1,30 +0,0 @@
#
# Copyright 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometer.dispatcher.resources import base
class Stack(base.ResourceBase):
@staticmethod
def get_resource_extra_attributes(sample):
return {}
@staticmethod
def get_metrics_names():
return ['stack.create',
'stack.update',
'stack.delete',
'stack.resume',
'stack.suspend',
]

View File

@ -1,33 +0,0 @@
#
# Copyright 2014 eNovance
#
# Authors: Julien Danjou <julien@danjou.info>
# Mehdi Abaakouk <mehdi.abaakouk@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometer.dispatcher.resources import base
class SwiftAccount(base.ResourceBase):
@staticmethod
def get_resource_extra_attributes(sample):
return {}
@staticmethod
def get_metrics_names():
return ['storage.objects.incoming.bytes',
'storage.objects.outgoing.bytes',
'storage.api.request',
'storage.objects.size',
'storage.objects',
'storage.objects.containers']

View File

@ -1,38 +0,0 @@
#
# Copyright 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometer.dispatcher.resources import base
class Volume(base.ResourceBase):
@staticmethod
def get_resource_extra_attributes(sample):
metadata = sample['resource_metadata']
params = {
"display_name": metadata['display_name'],
}
return params
@staticmethod
def get_metrics_names():
return ['volume',
'volume.size',
'volume.create',
'volume.delete',
'volume.update',
'volume.resize',
'volume.attach',
'volume.detach',
]

View File

@ -1,130 +0,0 @@
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from keystoneclient import exceptions
from oslo_config import cfg
from oslo_log import log
import requests
import six
from ceilometer.agent import plugin_base
from ceilometer.i18n import _
from ceilometer import sample
LOG = log.getLogger(__name__)
SERVICE_OPTS = [
cfg.StrOpt('kwapi',
default='energy',
help='Kwapi service type.'),
]
cfg.CONF.register_opts(SERVICE_OPTS, group='service_types')
class KwapiClient(object):
"""Kwapi API client."""
def __init__(self, url, token=None):
"""Initializes client."""
self.url = url
self.token = token
def iter_probes(self):
"""Returns a list of dicts describing all probes."""
probes_url = self.url + '/probes/'
headers = {}
if self.token is not None:
headers = {'X-Auth-Token': self.token}
timeout = cfg.CONF.http_timeout
request = requests.get(probes_url, headers=headers, timeout=timeout)
message = request.json()
probes = message['probes']
for key, value in six.iteritems(probes):
probe_dict = value
probe_dict['id'] = key
yield probe_dict
class _Base(plugin_base.PollsterBase):
"""Base class for the Kwapi pollster, derived from PollsterBase."""
@property
def default_discovery(self):
return 'endpoint:%s' % cfg.CONF.service_types.kwapi
@staticmethod
def get_kwapi_client(ksclient, endpoint):
"""Returns a KwapiClient configured with the proper url and token."""
return KwapiClient(endpoint, ksclient.auth_token)
CACHE_KEY_PROBE = 'kwapi.probes'
def _iter_probes(self, ksclient, cache, endpoint):
"""Iterate over all probes."""
key = '%s-%s' % (endpoint, self.CACHE_KEY_PROBE)
if key not in cache:
cache[key] = self._get_probes(ksclient, endpoint)
return iter(cache[key])
def _get_probes(self, ksclient, endpoint):
try:
client = self.get_kwapi_client(ksclient, endpoint)
except exceptions.EndpointNotFound:
LOG.debug(_("Kwapi endpoint not found"))
return []
return list(client.iter_probes())
class EnergyPollster(_Base):
"""Measures energy consumption."""
def get_samples(self, manager, cache, resources):
"""Returns all samples."""
for endpoint in resources:
for probe in self._iter_probes(manager.keystone, cache, endpoint):
yield sample.Sample(
name='energy',
type=sample.TYPE_CUMULATIVE,
unit='kWh',
volume=probe['kwh'],
user_id=None,
project_id=None,
resource_id=probe['id'],
timestamp=datetime.datetime.fromtimestamp(
probe['timestamp']).isoformat(),
resource_metadata={}
)
class PowerPollster(_Base):
"""Measures power consumption."""
def get_samples(self, manager, cache, resources):
"""Returns all samples."""
for endpoint in resources:
for probe in self._iter_probes(manager.keystone, cache, endpoint):
yield sample.Sample(
name='power',
type=sample.TYPE_GAUGE,
unit='W',
volume=probe['w'],
user_id=None,
project_id=None,
resource_id=probe['id'],
timestamp=datetime.datetime.fromtimestamp(
probe['timestamp']).isoformat(),
resource_metadata={}
)

View File

@ -1,404 +0,0 @@
#
# Copyright 2013 Rackspace Hosting.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import fnmatch
import os
import jsonpath_rw
from oslo_config import cfg
from oslo_log import log
from oslo_utils import timeutils
import six
import yaml
from ceilometer.event.storage import models
from ceilometer.i18n import _
OPTS = [
cfg.StrOpt('definitions_cfg_file',
default="event_definitions.yaml",
help="Configuration file for event definitions."
),
cfg.BoolOpt('drop_unmatched_notifications',
default=False,
help='Drop notifications if no event definition matches. '
'(Otherwise, we convert them with just the default traits)'),
cfg.MultiStrOpt('store_raw',
default=[],
help='Store the raw notification for select priority '
'levels (info and/or error). By default, raw details are '
'not captured.')
]
cfg.CONF.register_opts(OPTS, group='event')
LOG = log.getLogger(__name__)
class EventDefinitionException(Exception):
def __init__(self, message, definition_cfg):
super(EventDefinitionException, self).__init__(message)
self.definition_cfg = definition_cfg
def __str__(self):
return '%s %s: %s' % (self.__class__.__name__,
self.definition_cfg, self.message)
class TraitDefinition(object):
def __init__(self, name, trait_cfg, plugin_manager):
self.cfg = trait_cfg
self.name = name
type_name = trait_cfg.get('type', 'text')
if 'plugin' in trait_cfg:
plugin_cfg = trait_cfg['plugin']
if isinstance(plugin_cfg, six.string_types):
plugin_name = plugin_cfg
plugin_params = {}
else:
try:
plugin_name = plugin_cfg['name']
except KeyError:
raise EventDefinitionException(
_('Plugin specified, but no plugin name supplied for '
'trait %s') % name, self.cfg)
plugin_params = plugin_cfg.get('parameters')
if plugin_params is None:
plugin_params = {}
try:
plugin_ext = plugin_manager[plugin_name]
except KeyError:
raise EventDefinitionException(
_('No plugin named %(plugin)s available for '
'trait %(trait)s') % dict(plugin=plugin_name,
trait=name), self.cfg)
plugin_class = plugin_ext.plugin
self.plugin = plugin_class(**plugin_params)
else:
self.plugin = None
if 'fields' not in trait_cfg:
raise EventDefinitionException(
_("Required field in trait definition not specified: "
"'%s'") % 'fields',
self.cfg)
fields = trait_cfg['fields']
if not isinstance(fields, six.string_types):
# NOTE(mdragon): if not a string, we assume a list.
if len(fields) == 1:
fields = fields[0]
else:
fields = '|'.join('(%s)' % path for path in fields)
try:
self.fields = jsonpath_rw.parse(fields)
except Exception as e:
raise EventDefinitionException(
_("Parse error in JSONPath specification "
"'%(jsonpath)s' for %(trait)s: %(err)s")
% dict(jsonpath=fields, trait=name, err=e), self.cfg)
self.trait_type = models.Trait.get_type_by_name(type_name)
if self.trait_type is None:
raise EventDefinitionException(
_("Invalid trait type '%(type)s' for trait %(trait)s")
% dict(type=type_name, trait=name), self.cfg)
def _get_path(self, match):
if match.context is not None:
for path_element in self._get_path(match.context):
yield path_element
yield str(match.path)
def to_trait(self, notification_body):
values = [match for match in self.fields.find(notification_body)
if match.value is not None]
if self.plugin is not None:
value_map = [('.'.join(self._get_path(match)), match.value) for
match in values]
value = self.plugin.trait_value(value_map)
else:
value = values[0].value if values else None
if value is None:
return None
# NOTE(mdragon): some openstack projects (mostly Nova) emit ''
# for null fields for things like dates.
if self.trait_type != models.Trait.TEXT_TYPE and value == '':
return None
value = models.Trait.convert_value(self.trait_type, value)
return models.Trait(self.name, self.trait_type, value)
class EventDefinition(object):
DEFAULT_TRAITS = dict(
service=dict(type='text', fields='publisher_id'),
request_id=dict(type='text', fields='_context_request_id'),
tenant_id=dict(type='text', fields=['payload.tenant_id',
'_context_tenant']),
)
def __init__(self, definition_cfg, trait_plugin_mgr):
self._included_types = []
self._excluded_types = []
self.traits = dict()
self.cfg = definition_cfg
self.raw_levels = [level.lower() for level in cfg.CONF.event.store_raw]
try:
event_type = definition_cfg['event_type']
traits = definition_cfg['traits']
except KeyError as err:
raise EventDefinitionException(
_("Required field %s not specified") % err.args[0], self.cfg)
if isinstance(event_type, six.string_types):
event_type = [event_type]
for t in event_type:
if t.startswith('!'):
self._excluded_types.append(t[1:])
else:
self._included_types.append(t)
if self._excluded_types and not self._included_types:
self._included_types.append('*')
for trait_name in self.DEFAULT_TRAITS:
self.traits[trait_name] = TraitDefinition(
trait_name,
self.DEFAULT_TRAITS[trait_name],
trait_plugin_mgr)
for trait_name in traits:
self.traits[trait_name] = TraitDefinition(
trait_name,
traits[trait_name],
trait_plugin_mgr)
def included_type(self, event_type):
for t in self._included_types:
if fnmatch.fnmatch(event_type, t):
return True
return False
def excluded_type(self, event_type):
for t in self._excluded_types:
if fnmatch.fnmatch(event_type, t):
return True
return False
def match_type(self, event_type):
return (self.included_type(event_type)
and not self.excluded_type(event_type))
@property
def is_catchall(self):
return '*' in self._included_types and not self._excluded_types
@staticmethod
def _extract_when(body):
"""Extract the generated datetime from the notification."""
# NOTE: I am keeping the logic the same as it was in the collector,
# However, *ALL* notifications should have a 'timestamp' field, it's
# part of the notification envelope spec. If this was put here because
# some openstack project is generating notifications without a
# timestamp, then that needs to be filed as a bug with the offending
# project (mdragon)
when = body.get('timestamp', body.get('_context_timestamp'))
if when:
return timeutils.normalize_time(timeutils.parse_isotime(when))
return timeutils.utcnow()
def to_event(self, notification_body):
event_type = notification_body['event_type']
message_id = notification_body['message_id']
when = self._extract_when(notification_body)
traits = (self.traits[t].to_trait(notification_body)
for t in self.traits)
# Only accept non-None value traits ...
traits = [trait for trait in traits if trait is not None]
raw = (notification_body
if notification_body.get('priority') in self.raw_levels else {})
event = models.Event(message_id, event_type, when, traits, raw)
return event
class NotificationEventsConverter(object):
"""Notification Event Converter
The NotificationEventsConverter handles the conversion of Notifications
from openstack systems into Ceilometer Events.
The conversion is handled according to event definitions in a config file.
The config is a list of event definitions. Order is significant, a
notification will be processed according to the LAST definition that
matches it's event_type. (We use the last matching definition because that
allows you to use YAML merge syntax in the definitions file.)
Each definition is a dictionary with the following keys (all are
required):
- event_type: this is a list of notification event_types this definition
will handle. These can be wildcarded with unix shell glob (not regex!)
wildcards.
An exclusion listing (starting with a '!') will exclude any types listed
from matching. If ONLY exclusions are listed, the definition will match
anything not matching the exclusions.
This item can also be a string, which will be taken as equivalent to 1
item list.
Examples:
* ['compute.instance.exists'] will only match
compute.intance.exists notifications
* "compute.instance.exists" Same as above.
* ["image.create", "image.delete"] will match
image.create and image.delete, but not anything else.
* "compute.instance.*" will match
compute.instance.create.start but not image.upload
* ['*.start','*.end', '!scheduler.*'] will match
compute.instance.create.start, and image.delete.end,
but NOT compute.instance.exists or
scheduler.run_instance.start
* '!image.*' matches any notification except image
notifications.
* ['*', '!image.*'] same as above.
- traits: (dict) The keys are trait names, the values are the trait
definitions. Each trait definition is a dictionary with the following
keys:
- type (optional): The data type for this trait. (as a string)
Valid options are: 'text', 'int', 'float' and 'datetime', defaults to
'text' if not specified.
- fields: a path specification for the field(s) in the notification you
wish to extract. The paths can be specified with a dot syntax
(e.g. 'payload.host') or dictionary syntax (e.g. 'payload[host]') is
also supported.
In either case, if the key for the field you are looking for contains
special characters, like '.', it will need to be quoted (with double
or single quotes) like so::
"payload.image_meta.'org.openstack__1__architecture'"
The syntax used for the field specification is a variant of JSONPath,
and is fairly flexible.
(see: https://github.com/kennknowles/python-jsonpath-rw for more info)
Specifications can be written to match multiple possible fields, the
value for the trait will be derived from the matching fields that
exist and have a non-null (i.e. is not None) values in the
notification.
By default the value will be the first such field. (plugins can alter
that, if they wish)
This configuration value is normally a string, for convenience, it can
be specified as a list of specifications, which will be OR'ed together
(a union query in jsonpath terms)
- plugin (optional): (dictionary) with the following keys:
- name: (string) name of a plugin to load
- parameters: (optional) Dictionary of keyword args to pass
to the plugin on initialization. See documentation on each plugin to
see what arguments it accepts.
For convenience, this value can also be specified as a string, which is
interpreted as a plugin name, which will be loaded with no parameters.
"""
def __init__(self, events_config, trait_plugin_mgr, add_catchall=True):
self.definitions = [
EventDefinition(event_def, trait_plugin_mgr)
for event_def in reversed(events_config)]
if add_catchall and not any(d.is_catchall for d in self.definitions):
event_def = dict(event_type='*', traits={})
self.definitions.append(EventDefinition(event_def,
trait_plugin_mgr))
def to_event(self, notification_body):
event_type = notification_body['event_type']
message_id = notification_body['message_id']
edef = None
for d in self.definitions:
if d.match_type(event_type):
edef = d
break
if edef is None:
msg = (_('Dropping Notification %(type)s (uuid:%(msgid)s)')
% dict(type=event_type, msgid=message_id))
if cfg.CONF.event.drop_unmatched_notifications:
LOG.debug(msg)
else:
# If drop_unmatched_notifications is False, this should
# never happen. (mdragon)
LOG.error(msg)
return None
return edef.to_event(notification_body)
def get_config_file():
config_file = cfg.CONF.event.definitions_cfg_file
if not os.path.exists(config_file):
config_file = cfg.CONF.find_file(config_file)
return config_file
def setup_events(trait_plugin_mgr):
"""Setup the event definitions from yaml config file."""
config_file = get_config_file()
if config_file is not None:
LOG.debug(_("Event Definitions configuration file: %s"), config_file)
with open(config_file) as cf:
config = cf.read()
try:
events_config = yaml.safe_load(config)
except yaml.YAMLError as err:
if hasattr(err, 'problem_mark'):
mark = err.problem_mark
errmsg = (_("Invalid YAML syntax in Event Definitions file "
"%(file)s at line: %(line)s, column: %(column)s.")
% dict(file=config_file,
line=mark.line + 1,
column=mark.column + 1))
else:
errmsg = (_("YAML error reading Event Definitions file "
"%(file)s")
% dict(file=config_file))
LOG.error(errmsg)
raise
else:
LOG.debug(_("No Event Definitions configuration file found!"
" Using default config."))
events_config = []
LOG.info(_("Event Definitions: %s"), events_config)
allow_drop = cfg.CONF.event.drop_unmatched_notifications
return NotificationEventsConverter(events_config,
trait_plugin_mgr,
add_catchall=not allow_drop)

View File

@ -1,84 +0,0 @@
#
# Copyright 2012-2014 eNovance <licensing@enovance.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
from oslo_config import cfg
from oslo_context import context
import oslo_messaging
from stevedore import extension
from ceilometer.event import converter as event_converter
from ceilometer.i18n import _
from ceilometer import messaging
LOG = logging.getLogger(__name__)
class EventsNotificationEndpoint(object):
def __init__(self, manager):
super(EventsNotificationEndpoint, self).__init__()
LOG.debug(_('Loading event definitions'))
self.ctxt = context.get_admin_context()
self.event_converter = event_converter.setup_events(
extension.ExtensionManager(
namespace='ceilometer.event.trait_plugin'))
self.manager = manager
def info(self, ctxt, publisher_id, event_type, payload, metadata):
"""Convert message to Ceilometer Event.
:param ctxt: oslo_messaging context
:param publisher_id: publisher of the notification
:param event_type: type of notification
:param payload: notification payload
:param metadata: metadata about the notification
"""
# NOTE: the rpc layer currently rips out the notification
# delivery_info, which is critical to determining the
# source of the notification. This will have to get added back later.
notification = messaging.convert_to_old_notification_format(
'info', ctxt, publisher_id, event_type, payload, metadata)
self.process_notification(notification)
def error(self, ctxt, publisher_id, event_type, payload, metadata):
"""Convert error message to Ceilometer Event.
:param ctxt: oslo_messaging context
:param publisher_id: publisher of the notification
:param event_type: type of notification
:param payload: notification payload
:param metadata: metadata about the notification
"""
# NOTE: the rpc layer currently rips out the notification
# delivery_info, which is critical to determining the
# source of the notification. This will have to get added back later.
notification = messaging.convert_to_old_notification_format(
'error', ctxt, publisher_id, event_type, payload, metadata)
self.process_notification(notification)
def process_notification(self, notification):
try:
event = self.event_converter.to_event(notification)
if event is not None:
with self.manager.publisher(self.ctxt) as p:
p(event)
except Exception:
if not cfg.CONF.notification.ack_on_event_error:
return oslo_messaging.NotificationResult.REQUEUE
raise
return oslo_messaging.NotificationResult.HANDLED

View File

@ -1,99 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ceilometer
class Connection(object):
"""Base class for event storage system connections."""
# A dictionary representing the capabilities of this driver.
CAPABILITIES = {
'events': {'query': {'simple': False}},
}
STORAGE_CAPABILITIES = {
'storage': {'production_ready': False},
}
def __init__(self, url):
pass
@staticmethod
def upgrade():
"""Migrate the database to `version` or the most recent version."""
@staticmethod
def clear():
"""Clear database."""
@staticmethod
def record_events(events):
"""Write the events to the backend storage system.
:param events: a list of model.Event objects.
"""
raise ceilometer.NotImplementedError('Events not implemented.')
@staticmethod
def get_events(event_filter):
"""Return an iterable of model.Event objects."""
raise ceilometer.NotImplementedError('Events not implemented.')
@staticmethod
def get_event_types():
"""Return all event types as an iterable of strings."""
raise ceilometer.NotImplementedError('Events not implemented.')
@staticmethod
def get_trait_types(event_type):
"""Return a dictionary containing the name and data type of the trait.
Only trait types for the provided event_type are
returned.
:param event_type: the type of the Event
"""
raise ceilometer.NotImplementedError('Events not implemented.')
@staticmethod
def get_traits(event_type, trait_type=None):
"""Return all trait instances associated with an event_type.
If trait_type is specified, only return instances of that trait type.
:param event_type: the type of the Event to filter by
:param trait_type: the name of the Trait to filter by
"""
raise ceilometer.NotImplementedError('Events not implemented.')
@classmethod
def get_capabilities(cls):
"""Return an dictionary with the capabilities of each driver."""
return cls.CAPABILITIES
@classmethod
def get_storage_capabilities(cls):
"""Return a dictionary representing the performance capabilities.
This is needed to evaluate the performance of each driver.
"""
return cls.STORAGE_CAPABILITIES
@staticmethod
def clear_expired_event_data(ttl):
"""Clear expired data from the backend storage system.
Clearing occurs according to the time-to-live.
:param ttl: Number of seconds to keep records for.
"""
raise ceilometer.NotImplementedError('Clearing events not implemented')

View File

@ -1,69 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""DB2 storage backend
"""
import pymongo
from ceilometer.event.storage import pymongo_base
from ceilometer import storage
from ceilometer.storage.mongo import utils as pymongo_utils
class Connection(pymongo_base.Connection):
"""The db2 event storage for Ceilometer."""
CONNECTION_POOL = pymongo_utils.ConnectionPool()
def __init__(self, url):
# Since we are using pymongo, even though we are connecting to DB2
# we still have to make sure that the scheme which used to distinguish
# db2 driver from mongodb driver be replaced so that pymongo will not
# produce an exception on the scheme.
url = url.replace('db2:', 'mongodb:', 1)
self.conn = self.CONNECTION_POOL.connect(url)
# Require MongoDB 2.2 to use aggregate(), since we are using mongodb
# as backend for test, the following code is necessary to make sure
# that the test wont try aggregate on older mongodb during the test.
# For db2, the versionArray won't be part of the server_info, so there
# will not be exception when real db2 gets used as backend.
server_info = self.conn.server_info()
if server_info.get('sysInfo'):
self._using_mongodb = True
else:
self._using_mongodb = False
if self._using_mongodb and server_info.get('versionArray') < [2, 2]:
raise storage.StorageBadVersion("Need at least MongoDB 2.2")
connection_options = pymongo.uri_parser.parse_uri(url)
self.db = getattr(self.conn, connection_options['database'])
if connection_options.get('username'):
self.db.authenticate(connection_options['username'],
connection_options['password'])
self.upgrade()
def upgrade(self):
# create collection if not present
if 'event' not in self.db.conn.collection_names():
self.db.conn.create_collection('event')
def clear(self):
# drop_database command does nothing on db2 database since this has
# not been implemented. However calling this method is important for
# removal of all the empty dbs created during the test runs since
# test run is against mongodb on Jenkins
self.conn.drop_database(self.db.name)
self.conn.close()

View File

@ -1,278 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
import operator
import elasticsearch as es
from elasticsearch import helpers
from oslo_log import log
from oslo_utils import netutils
from oslo_utils import timeutils
import six
from ceilometer.event.storage import base
from ceilometer.event.storage import models
from ceilometer.i18n import _LE, _LI
from ceilometer import storage
from ceilometer import utils
LOG = log.getLogger(__name__)
AVAILABLE_CAPABILITIES = {
'events': {'query': {'simple': True}},
}
AVAILABLE_STORAGE_CAPABILITIES = {
'storage': {'production_ready': True},
}
class Connection(base.Connection):
"""Put the event data into an ElasticSearch db.
Events in ElasticSearch are indexed by day and stored by event_type.
An example document::
{"_index":"events_2014-10-21",
"_type":"event_type0",
"_id":"dc90e464-65ab-4a5d-bf66-ecb956b5d779",
"_score":1.0,
"_source":{"timestamp": "2014-10-21T20:02:09.274797"
"traits": {"id4_0": "2014-10-21T20:02:09.274797",
"id3_0": 0.7510790937279408,
"id2_0": 5,
"id1_0": "18c97ba1-3b74-441a-b948-a702a30cbce2"}
}
}
"""
CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES,
AVAILABLE_CAPABILITIES)
STORAGE_CAPABILITIES = utils.update_nested(
base.Connection.STORAGE_CAPABILITIES,
AVAILABLE_STORAGE_CAPABILITIES,
)
index_name = 'events'
# NOTE(gordc): mainly for testing, data is not searchable after write,
# it is only searchable after periodic refreshes.
_refresh_on_write = False
def __init__(self, url):
url_split = netutils.urlsplit(url)
self.conn = es.Elasticsearch(url_split.netloc)
def upgrade(self):
iclient = es.client.IndicesClient(self.conn)
ts_template = {
'template': '*',
'mappings': {'_default_':
{'_timestamp': {'enabled': True,
'store': True},
'properties': {'traits': {'type': 'nested'}}}}}
iclient.put_template(name='enable_timestamp', body=ts_template)
def record_events(self, events):
def _build_bulk_index(event_list):
for ev in event_list:
traits = {t.name: t.value for t in ev.traits}
yield {'_op_type': 'create',
'_index': '%s_%s' % (self.index_name,
ev.generated.date().isoformat()),
'_type': ev.event_type,
'_id': ev.message_id,
'_source': {'timestamp': ev.generated.isoformat(),
'traits': traits,
'raw': ev.raw}}
error = None
for ok, result in helpers.streaming_bulk(
self.conn, _build_bulk_index(events)):
if not ok:
__, result = result.popitem()
if result['status'] == 409:
LOG.info(_LI('Duplicate event detected, skipping it: %s')
% result)
else:
LOG.exception(_LE('Failed to record event: %s') % result)
error = storage.StorageUnknownWriteError(result)
if self._refresh_on_write:
self.conn.indices.refresh(index='%s_*' % self.index_name)
while self.conn.cluster.pending_tasks(local=True)['tasks']:
pass
if error:
raise error
def _make_dsl_from_filter(self, indices, ev_filter):
q_args = {}
filters = []
if ev_filter.start_timestamp:
filters.append({'range': {'timestamp':
{'ge': ev_filter.start_timestamp.isoformat()}}})
while indices[0] < (
'%s_%s' % (self.index_name,
ev_filter.start_timestamp.date().isoformat())):
del indices[0]
if ev_filter.end_timestamp:
filters.append({'range': {'timestamp':
{'le': ev_filter.end_timestamp.isoformat()}}})
while indices[-1] > (
'%s_%s' % (self.index_name,
ev_filter.end_timestamp.date().isoformat())):
del indices[-1]
q_args['index'] = indices
if ev_filter.event_type:
q_args['doc_type'] = ev_filter.event_type
if ev_filter.message_id:
filters.append({'term': {'_id': ev_filter.message_id}})
if ev_filter.traits_filter:
trait_filters = []
for t_filter in ev_filter.traits_filter:
value = None
for val_type in ['integer', 'string', 'float', 'datetime']:
if t_filter.get(val_type):
value = t_filter.get(val_type)
if isinstance(value, six.string_types):
value = value.lower()
elif isinstance(value, datetime.datetime):
value = value.isoformat()
break
if t_filter.get('op') in ['gt', 'ge', 'lt', 'le']:
op = (t_filter.get('op').replace('ge', 'gte')
.replace('le', 'lte'))
trait_filters.append(
{'range': {t_filter['key']: {op: value}}})
else:
tf = {"query": {"query_string": {
"query": "%s: \"%s\"" % (t_filter['key'], value)}}}
if t_filter.get('op') == 'ne':
tf = {"not": tf}
trait_filters.append(tf)
filters.append(
{'nested': {'path': 'traits', 'query': {'filtered': {
'filter': {'bool': {'must': trait_filters}}}}}})
q_args['body'] = {'query': {'filtered':
{'filter': {'bool': {'must': filters}}}}}
return q_args
def get_events(self, event_filter):
iclient = es.client.IndicesClient(self.conn)
indices = iclient.get_mapping('%s_*' % self.index_name).keys()
if indices:
filter_args = self._make_dsl_from_filter(indices, event_filter)
results = self.conn.search(fields=['_id', 'timestamp',
'_type', '_source'],
sort='timestamp:asc',
**filter_args)
trait_mappings = {}
for record in results['hits']['hits']:
trait_list = []
if not record['_type'] in trait_mappings:
trait_mappings[record['_type']] = list(
self.get_trait_types(record['_type']))
for key in record['_source']['traits'].keys():
value = record['_source']['traits'][key]
for t_map in trait_mappings[record['_type']]:
if t_map['name'] == key:
dtype = t_map['data_type']
break
else:
dtype = models.Trait.TEXT_TYPE
trait_list.append(models.Trait(
name=key, dtype=dtype,
value=models.Trait.convert_value(dtype, value)))
gen_ts = timeutils.normalize_time(timeutils.parse_isotime(
record['_source']['timestamp']))
yield models.Event(message_id=record['_id'],
event_type=record['_type'],
generated=gen_ts,
traits=sorted(
trait_list,
key=operator.attrgetter('dtype')),
raw=record['_source']['raw'])
def get_event_types(self):
iclient = es.client.IndicesClient(self.conn)
es_mappings = iclient.get_mapping('%s_*' % self.index_name)
seen_types = set()
for index in es_mappings.keys():
for ev_type in es_mappings[index]['mappings'].keys():
seen_types.add(ev_type)
# TODO(gordc): tests assume sorted ordering but backends are not
# explicitly ordered.
# NOTE: _default_ is a type that appears in all mappings but is not
# real 'type'
seen_types.discard('_default_')
return sorted(list(seen_types))
@staticmethod
def _remap_es_types(d_type):
if d_type == 'string':
d_type = 'text'
elif d_type == 'long':
d_type = 'int'
elif d_type == 'double':
d_type = 'float'
elif d_type == 'date' or d_type == 'date_time':
d_type = 'datetime'
return d_type
def get_trait_types(self, event_type):
iclient = es.client.IndicesClient(self.conn)
es_mappings = iclient.get_mapping('%s_*' % self.index_name)
seen_types = []
for index in es_mappings.keys():
# if event_type exists in index and has traits
if (es_mappings[index]['mappings'].get(event_type) and
es_mappings[index]['mappings'][event_type]['properties']
['traits'].get('properties')):
for t_type in (es_mappings[index]['mappings'][event_type]
['properties']['traits']['properties'].keys()):
d_type = (es_mappings[index]['mappings'][event_type]
['properties']['traits']['properties']
[t_type]['type'])
d_type = models.Trait.get_type_by_name(
self._remap_es_types(d_type))
if (t_type, d_type) not in seen_types:
yield {'name': t_type, 'data_type': d_type}
seen_types.append((t_type, d_type))
def get_traits(self, event_type, trait_type=None):
t_types = dict((res['name'], res['data_type'])
for res in self.get_trait_types(event_type))
if not t_types or (trait_type and trait_type not in t_types.keys()):
return
result = self.conn.search('%s_*' % self.index_name, event_type)
for ev in result['hits']['hits']:
if trait_type and ev['_source']['traits'].get(trait_type):
yield models.Trait(
name=trait_type,
dtype=t_types[trait_type],
value=models.Trait.convert_value(
t_types[trait_type],
ev['_source']['traits'][trait_type]))
else:
for trait in ev['_source']['traits'].keys():
yield models.Trait(
name=trait,
dtype=t_types[trait],
value=models.Trait.convert_value(
t_types[trait],
ev['_source']['traits'][trait]))

View File

@ -1,221 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import operator
from oslo_log import log
from ceilometer.event.storage import base
from ceilometer.event.storage import models
from ceilometer.i18n import _, _LE
from ceilometer.storage.hbase import base as hbase_base
from ceilometer.storage.hbase import utils as hbase_utils
from ceilometer import utils
LOG = log.getLogger(__name__)
AVAILABLE_CAPABILITIES = {
'events': {'query': {'simple': True}},
}
AVAILABLE_STORAGE_CAPABILITIES = {
'storage': {'production_ready': True},
}
class Connection(hbase_base.Connection, base.Connection):
"""Put the event data into a HBase database
Collections:
- events:
- row_key: timestamp of event's generation + uuid of event
in format: "%s:%s" % (ts, Event.message_id)
- Column Families:
f: contains the following qualifiers:
- event_type: description of event's type
- timestamp: time stamp of event generation
- all traits for this event in format:
.. code-block:: python
"%s:%s" % (trait_name, trait_type)
"""
CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES,
AVAILABLE_CAPABILITIES)
STORAGE_CAPABILITIES = utils.update_nested(
base.Connection.STORAGE_CAPABILITIES,
AVAILABLE_STORAGE_CAPABILITIES,
)
_memory_instance = None
EVENT_TABLE = "event"
def __init__(self, url):
super(Connection, self).__init__(url)
def upgrade(self):
tables = [self.EVENT_TABLE]
column_families = {'f': dict(max_versions=1)}
with self.conn_pool.connection() as conn:
hbase_utils.create_tables(conn, tables, column_families)
def clear(self):
LOG.debug(_('Dropping HBase schema...'))
with self.conn_pool.connection() as conn:
for table in [self.EVENT_TABLE]:
try:
conn.disable_table(table)
except Exception:
LOG.debug(_('Cannot disable table but ignoring error'))
try:
conn.delete_table(table)
except Exception:
LOG.debug(_('Cannot delete table but ignoring error'))
def record_events(self, event_models):
"""Write the events to Hbase.
:param event_models: a list of models.Event objects.
"""
error = None
with self.conn_pool.connection() as conn:
events_table = conn.table(self.EVENT_TABLE)
for event_model in event_models:
# Row key consists of timestamp and message_id from
# models.Event or purposes of storage event sorted by
# timestamp in the database.
ts = event_model.generated
row = hbase_utils.prepare_key(
hbase_utils.timestamp(ts, reverse=False),
event_model.message_id)
event_type = event_model.event_type
traits = {}
if event_model.traits:
for trait in event_model.traits:
key = hbase_utils.prepare_key(trait.name, trait.dtype)
traits[key] = trait.value
record = hbase_utils.serialize_entry(traits,
event_type=event_type,
timestamp=ts,
raw=event_model.raw)
try:
events_table.put(row, record)
except Exception as ex:
LOG.exception(_LE("Failed to record event: %s") % ex)
error = ex
if error:
raise error
def get_events(self, event_filter):
"""Return an iter of models.Event objects.
:param event_filter: storage.EventFilter object, consists of filters
for events that are stored in database.
"""
q, start, stop = hbase_utils.make_events_query_from_filter(
event_filter)
with self.conn_pool.connection() as conn:
events_table = conn.table(self.EVENT_TABLE)
gen = events_table.scan(filter=q, row_start=start, row_stop=stop)
for event_id, data in gen:
traits = []
events_dict = hbase_utils.deserialize_entry(data)[0]
for key, value in events_dict.items():
if isinstance(key, tuple):
trait_name, trait_dtype = key
traits.append(models.Trait(name=trait_name,
dtype=int(trait_dtype),
value=value))
ts, mess = event_id.split(':')
yield models.Event(
message_id=hbase_utils.unquote(mess),
event_type=events_dict['event_type'],
generated=events_dict['timestamp'],
traits=sorted(traits,
key=operator.attrgetter('dtype')),
raw=events_dict['raw']
)
def get_event_types(self):
"""Return all event types as an iterable of strings."""
with self.conn_pool.connection() as conn:
events_table = conn.table(self.EVENT_TABLE)
gen = events_table.scan()
event_types = set()
for event_id, data in gen:
events_dict = hbase_utils.deserialize_entry(data)[0]
for key, value in events_dict.items():
if not isinstance(key, tuple) and key.startswith('event_type'):
if value not in event_types:
event_types.add(value)
yield value
def get_trait_types(self, event_type):
"""Return a dictionary containing the name and data type of the trait.
Only trait types for the provided event_type are returned.
:param event_type: the type of the Event
"""
q = hbase_utils.make_query(event_type=event_type)
trait_names = set()
with self.conn_pool.connection() as conn:
events_table = conn.table(self.EVENT_TABLE)
gen = events_table.scan(filter=q)
for event_id, data in gen:
events_dict = hbase_utils.deserialize_entry(data)[0]
for key, value in events_dict.items():
if isinstance(key, tuple):
trait_name, trait_type = key
if trait_name not in trait_names:
# Here we check that our method return only unique
# trait types, for ex. if it is found the same trait
# types in different events with equal event_type,
# method will return only one trait type. It is
# proposed that certain trait name could have only one
# trait type.
trait_names.add(trait_name)
data_type = models.Trait.type_names[int(trait_type)]
yield {'name': trait_name, 'data_type': data_type}
def get_traits(self, event_type, trait_type=None):
"""Return all trait instances associated with an event_type.
If trait_type is specified, only return instances of that trait type.
:param event_type: the type of the Event to filter by
:param trait_type: the name of the Trait to filter by
"""
q = hbase_utils.make_query(event_type=event_type,
trait_type=trait_type)
with self.conn_pool.connection() as conn:
events_table = conn.table(self.EVENT_TABLE)
gen = events_table.scan(filter=q)
for event_id, data in gen:
events_dict = hbase_utils.deserialize_entry(data)[0]
for key, value in events_dict.items():
if isinstance(key, tuple):
trait_name, trait_type = key
yield models.Trait(name=trait_name,
dtype=int(trait_type), value=value)

View File

@ -1,33 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_log import log
from ceilometer.event.storage import base
from ceilometer.i18n import _LI
LOG = log.getLogger(__name__)
class Connection(base.Connection):
"""Log event data."""
@staticmethod
def clear_expired_event_data(ttl):
"""Clear expired data from the backend storage system.
Clearing occurs according to the time-to-live.
:param ttl: Number of seconds to keep records for.
"""
LOG.info(_LI("Dropping event data with TTL %d"), ttl)

View File

@ -1,78 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""MongoDB storage backend"""
from oslo_config import cfg
from oslo_log import log
import pymongo
from ceilometer.event.storage import pymongo_base
from ceilometer import storage
from ceilometer.storage import impl_mongodb
from ceilometer.storage.mongo import utils as pymongo_utils
LOG = log.getLogger(__name__)
class Connection(pymongo_base.Connection):
"""Put the event data into a MongoDB database."""
CONNECTION_POOL = pymongo_utils.ConnectionPool()
def __init__(self, url):
# NOTE(jd) Use our own connection pooling on top of the Pymongo one.
# We need that otherwise we overflow the MongoDB instance with new
# connection since we instantiate a Pymongo client each time someone
# requires a new storage connection.
self.conn = self.CONNECTION_POOL.connect(url)
# Require MongoDB 2.4 to use $setOnInsert
if self.conn.server_info()['versionArray'] < [2, 4]:
raise storage.StorageBadVersion("Need at least MongoDB 2.4")
connection_options = pymongo.uri_parser.parse_uri(url)
self.db = getattr(self.conn, connection_options['database'])
if connection_options.get('username'):
self.db.authenticate(connection_options['username'],
connection_options['password'])
# NOTE(jd) Upgrading is just about creating index, so let's do this
# on connection to be sure at least the TTL is correctly updated if
# needed.
self.upgrade()
def upgrade(self):
# create collection if not present
if 'event' not in self.db.conn.collection_names():
self.db.conn.create_collection('event')
# Establish indexes
ttl = cfg.CONF.database.event_time_to_live
impl_mongodb.Connection.update_ttl(ttl, 'event_ttl', 'timestamp',
self.db.event)
def clear(self):
self.conn.drop_database(self.db.name)
# Connection will be reopened automatically if needed
self.conn.close()
@staticmethod
def clear_expired_event_data(ttl):
"""Clear expired data from the backend storage system.
Clearing occurs according to the time-to-live.
:param ttl: Number of seconds to keep records for.
"""
LOG.debug("Clearing expired event data is based on native "
"MongoDB time to live feature and going in background.")

View File

@ -1,419 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""SQLAlchemy storage backend."""
from __future__ import absolute_import
import datetime
import os
from oslo_config import cfg
from oslo_db import exception as dbexc
from oslo_db.sqlalchemy import session as db_session
from oslo_log import log
from oslo_utils import timeutils
import sqlalchemy as sa
from ceilometer.event.storage import base
from ceilometer.event.storage import models as api_models
from ceilometer.i18n import _LE, _LI
from ceilometer.storage.sqlalchemy import models
from ceilometer import utils
LOG = log.getLogger(__name__)
AVAILABLE_CAPABILITIES = {
'events': {'query': {'simple': True}},
}
AVAILABLE_STORAGE_CAPABILITIES = {
'storage': {'production_ready': True},
}
TRAIT_MAPLIST = [(api_models.Trait.NONE_TYPE, models.TraitText),
(api_models.Trait.TEXT_TYPE, models.TraitText),
(api_models.Trait.INT_TYPE, models.TraitInt),
(api_models.Trait.FLOAT_TYPE, models.TraitFloat),
(api_models.Trait.DATETIME_TYPE, models.TraitDatetime)]
TRAIT_ID_TO_MODEL = dict((x, y) for x, y in TRAIT_MAPLIST)
TRAIT_MODEL_TO_ID = dict((y, x) for x, y in TRAIT_MAPLIST)
trait_models_dict = {'string': models.TraitText,
'integer': models.TraitInt,
'datetime': models.TraitDatetime,
'float': models.TraitFloat}
def _build_trait_query(session, trait_type, key, value, op='eq'):
trait_model = trait_models_dict[trait_type]
op_dict = {'eq': (trait_model.value == value),
'lt': (trait_model.value < value),
'le': (trait_model.value <= value),
'gt': (trait_model.value > value),
'ge': (trait_model.value >= value),
'ne': (trait_model.value != value)}
conditions = [trait_model.key == key, op_dict[op]]
return (session.query(trait_model.event_id.label('ev_id'))
.filter(*conditions), trait_model)
class Connection(base.Connection):
"""Put the event data into a SQLAlchemy database.
Tables::
- EventType
- event definition
- { id: event type id
desc: description of event
}
- Event
- event data
- { id: event id
message_id: message id
generated = timestamp of event
event_type_id = event type -> eventtype.id
}
- TraitInt
- int trait value
- { event_id: event -> event.id
key: trait name
value: integer value
}
- TraitDatetime
- datetime trait value
- { event_id: event -> event.id
key: trait name
value: datetime value
}
- TraitText
- text trait value
- { event_id: event -> event.id
key: trait name
value: text value
}
- TraitFloat
- float trait value
- { event_id: event -> event.id
key: trait name
value: float value
}
"""
CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES,
AVAILABLE_CAPABILITIES)
STORAGE_CAPABILITIES = utils.update_nested(
base.Connection.STORAGE_CAPABILITIES,
AVAILABLE_STORAGE_CAPABILITIES,
)
def __init__(self, url):
# Set max_retries to 0, since oslo.db in certain cases may attempt
# to retry making the db connection retried max_retries ^ 2 times
# in failure case and db reconnection has already been implemented
# in storage.__init__.get_connection_from_config function
options = dict(cfg.CONF.database.items())
options['max_retries'] = 0
self._engine_facade = db_session.EngineFacade(url, **options)
def upgrade(self):
# NOTE(gordc): to minimise memory, only import migration when needed
from oslo_db.sqlalchemy import migration
path = os.path.join(os.path.abspath(os.path.dirname(__file__)),
'..', '..', 'storage', 'sqlalchemy',
'migrate_repo')
migration.db_sync(self._engine_facade.get_engine(), path)
def clear(self):
engine = self._engine_facade.get_engine()
for table in reversed(models.Base.metadata.sorted_tables):
engine.execute(table.delete())
engine.dispose()
def _get_or_create_event_type(self, event_type, session=None):
"""Check if an event type with the supplied name is already exists.
If not, we create it and return the record. This may result in a flush.
"""
if session is None:
session = self._engine_facade.get_session()
with session.begin(subtransactions=True):
et = session.query(models.EventType).filter(
models.EventType.desc == event_type).first()
if not et:
et = models.EventType(event_type)
session.add(et)
return et
def record_events(self, event_models):
"""Write the events to SQL database via sqlalchemy.
:param event_models: a list of model.Event objects.
"""
session = self._engine_facade.get_session()
error = None
for event_model in event_models:
event = None
try:
with session.begin():
event_type = self._get_or_create_event_type(
event_model.event_type, session=session)
event = models.Event(event_model.message_id, event_type,
event_model.generated,
event_model.raw)
session.add(event)
session.flush()
if event_model.traits:
trait_map = {}
for trait in event_model.traits:
if trait_map.get(trait.dtype) is None:
trait_map[trait.dtype] = []
trait_map[trait.dtype].append(
{'event_id': event.id,
'key': trait.name,
'value': trait.value})
for dtype in trait_map.keys():
model = TRAIT_ID_TO_MODEL[dtype]
session.execute(model.__table__.insert(),
trait_map[dtype])
except dbexc.DBDuplicateEntry as e:
LOG.info(_LI("Duplicate event detected, skipping it: %s") % e)
except KeyError as e:
LOG.exception(_LE('Failed to record event: %s') % e)
except Exception as e:
LOG.exception(_LE('Failed to record event: %s') % e)
error = e
if error:
raise error
def get_events(self, event_filter):
"""Return an iterable of model.Event objects.
:param event_filter: EventFilter instance
"""
session = self._engine_facade.get_session()
with session.begin():
event_query = session.query(models.Event)
# Build up the join conditions
event_join_conditions = [models.EventType.id ==
models.Event.event_type_id]
if event_filter.event_type:
event_join_conditions.append(models.EventType.desc ==
event_filter.event_type)
event_query = event_query.join(models.EventType,
sa.and_(*event_join_conditions))
# Build up the where conditions
event_filter_conditions = []
if event_filter.message_id:
event_filter_conditions.append(
models.Event.message_id == event_filter.message_id)
if event_filter.start_timestamp:
event_filter_conditions.append(
models.Event.generated >= event_filter.start_timestamp)
if event_filter.end_timestamp:
event_filter_conditions.append(
models.Event.generated <= event_filter.end_timestamp)
if event_filter_conditions:
event_query = (event_query.
filter(sa.and_(*event_filter_conditions)))
trait_subq = None
# Build trait filter
if event_filter.traits_filter:
filters = list(event_filter.traits_filter)
trait_filter = filters.pop()
key = trait_filter.pop('key')
op = trait_filter.pop('op', 'eq')
trait_type, value = list(trait_filter.items())[0]
trait_subq, t_model = _build_trait_query(session, trait_type,
key, value, op)
for trait_filter in filters:
key = trait_filter.pop('key')
op = trait_filter.pop('op', 'eq')
trait_type, value = list(trait_filter.items())[0]
q, model = _build_trait_query(session, trait_type,
key, value, op)
trait_subq = trait_subq.filter(
q.filter(model.event_id == t_model.event_id).exists())
trait_subq = trait_subq.subquery()
query = (session.query(models.Event.id)
.join(models.EventType,
sa.and_(*event_join_conditions)))
if trait_subq is not None:
query = query.join(trait_subq,
trait_subq.c.ev_id == models.Event.id)
if event_filter_conditions:
query = query.filter(sa.and_(*event_filter_conditions))
event_list = {}
# get a list of all events that match filters
for (id_, generated, message_id,
desc, raw) in query.add_columns(
models.Event.generated, models.Event.message_id,
models.EventType.desc, models.Event.raw).order_by(
models.Event.generated).all():
event_list[id_] = api_models.Event(message_id, desc,
generated, [], raw)
# Query all traits related to events.
# NOTE (gordc): cast is done because pgsql defaults to TEXT when
# handling unknown values such as null.
trait_q = (
query.join(
models.TraitDatetime,
models.TraitDatetime.event_id == models.Event.id)
.add_columns(
models.TraitDatetime.key, models.TraitDatetime.value,
sa.cast(sa.null(), sa.Integer),
sa.cast(sa.null(), sa.Float(53)),
sa.cast(sa.null(), sa.String(255)))
).union(
query.join(
models.TraitInt,
models.TraitInt.event_id == models.Event.id)
.add_columns(models.TraitInt.key, sa.null(),
models.TraitInt.value, sa.null(), sa.null()),
query.join(
models.TraitFloat,
models.TraitFloat.event_id == models.Event.id)
.add_columns(models.TraitFloat.key, sa.null(),
sa.null(), models.TraitFloat.value, sa.null()),
query.join(
models.TraitText,
models.TraitText.event_id == models.Event.id)
.add_columns(models.TraitText.key, sa.null(),
sa.null(), sa.null(), models.TraitText.value))
for id_, key, t_date, t_int, t_float, t_text in (
trait_q.order_by('2')).all():
if t_int is not None:
dtype = api_models.Trait.INT_TYPE
val = t_int
elif t_float is not None:
dtype = api_models.Trait.FLOAT_TYPE
val = t_float
elif t_date is not None:
dtype = api_models.Trait.DATETIME_TYPE
val = t_date
else:
dtype = api_models.Trait.TEXT_TYPE
val = t_text
trait_model = api_models.Trait(key, dtype, val)
event_list[id_].append_trait(trait_model)
return event_list.values()
def get_event_types(self):
"""Return all event types as an iterable of strings."""
session = self._engine_facade.get_session()
with session.begin():
query = (session.query(models.EventType.desc).
order_by(models.EventType.desc))
for name in query.all():
# The query returns a tuple with one element.
yield name[0]
def get_trait_types(self, event_type):
"""Return a dictionary containing the name and data type of the trait.
Only trait types for the provided event_type are returned.
:param event_type: the type of the Event
"""
session = self._engine_facade.get_session()
with session.begin():
for trait_model in [models.TraitText, models.TraitInt,
models.TraitFloat, models.TraitDatetime]:
query = (session.query(trait_model.key)
.join(models.Event,
models.Event.id == trait_model.event_id)
.join(models.EventType,
sa.and_(models.EventType.id ==
models.Event.event_type_id,
models.EventType.desc == event_type))
.distinct())
dtype = TRAIT_MODEL_TO_ID.get(trait_model)
for row in query.all():
yield {'name': row[0], 'data_type': dtype}
def get_traits(self, event_type, trait_type=None):
"""Return all trait instances associated with an event_type.
If trait_type is specified, only return instances of that trait type.
:param event_type: the type of the Event to filter by
:param trait_type: the name of the Trait to filter by
"""
session = self._engine_facade.get_session()
with session.begin():
for trait_model in [models.TraitText, models.TraitInt,
models.TraitFloat, models.TraitDatetime]:
query = (session.query(trait_model.key, trait_model.value)
.join(models.Event,
models.Event.id == trait_model.event_id)
.join(models.EventType,
sa.and_(models.EventType.id ==
models.Event.event_type_id,
models.EventType.desc == event_type))
.order_by(trait_model.key))
if trait_type:
query = query.filter(trait_model.key == trait_type)
dtype = TRAIT_MODEL_TO_ID.get(trait_model)
for k, v in query.all():
yield api_models.Trait(name=k,
dtype=dtype,
value=v)
def clear_expired_event_data(self, ttl):
"""Clear expired data from the backend storage system.
Clearing occurs according to the time-to-live.
:param ttl: Number of seconds to keep records for.
"""
session = self._engine_facade.get_session()
with session.begin():
end = timeutils.utcnow() - datetime.timedelta(seconds=ttl)
event_q = (session.query(models.Event.id)
.filter(models.Event.generated < end))
event_subq = event_q.subquery()
for trait_model in [models.TraitText, models.TraitInt,
models.TraitFloat, models.TraitDatetime]:
(session.query(trait_model)
.filter(trait_model.event_id.in_(event_subq))
.delete(synchronize_session="fetch"))
event_rows = event_q.delete()
# remove EventType and TraitType with no corresponding
# matching events and traits
(session.query(models.EventType)
.filter(~models.EventType.events.any())
.delete(synchronize_session="fetch"))
LOG.info(_LI("%d events are removed from database"), event_rows)

View File

@ -1,127 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Model classes for use in the events storage API.
"""
from oslo_utils import timeutils
import six
from ceilometer.storage import base
def serialize_dt(value):
"""Serializes parameter if it is datetime."""
return value.isoformat() if hasattr(value, 'isoformat') else value
class Event(base.Model):
"""A raw event from the source system. Events have Traits.
Metrics will be derived from one or more Events.
"""
DUPLICATE = 1
UNKNOWN_PROBLEM = 2
INCOMPATIBLE_TRAIT = 3
def __init__(self, message_id, event_type, generated, traits, raw):
"""Create a new event.
:param message_id: Unique ID for the message this event
stemmed from. This is different than
the Event ID, which comes from the
underlying storage system.
:param event_type: The type of the event.
:param generated: UTC time for when the event occurred.
:param traits: list of Traits on this Event.
:param raw: Unindexed raw notification details.
"""
base.Model.__init__(self, message_id=message_id, event_type=event_type,
generated=generated, traits=traits, raw=raw)
def append_trait(self, trait_model):
self.traits.append(trait_model)
def __repr__(self):
trait_list = []
if self.traits:
trait_list = [six.text_type(trait) for trait in self.traits]
return ("<Event: %s, %s, %s, %s>" %
(self.message_id, self.event_type, self.generated,
" ".join(trait_list)))
def serialize(self):
return {'message_id': self.message_id,
'event_type': self.event_type,
'generated': serialize_dt(self.generated),
'traits': [trait.serialize() for trait in self.traits],
'raw': self.raw}
class Trait(base.Model):
"""A Trait is a key/value pair of data on an Event.
The value is variant record of basic data types (int, date, float, etc).
"""
NONE_TYPE = 0
TEXT_TYPE = 1
INT_TYPE = 2
FLOAT_TYPE = 3
DATETIME_TYPE = 4
type_names = {
NONE_TYPE: "none",
TEXT_TYPE: "string",
INT_TYPE: "integer",
FLOAT_TYPE: "float",
DATETIME_TYPE: "datetime"
}
def __init__(self, name, dtype, value):
if not dtype:
dtype = Trait.NONE_TYPE
base.Model.__init__(self, name=name, dtype=dtype, value=value)
def __repr__(self):
return "<Trait: %s %d %s>" % (self.name, self.dtype, self.value)
def serialize(self):
return self.name, self.dtype, serialize_dt(self.value)
def get_type_name(self):
return self.get_name_by_type(self.dtype)
@classmethod
def get_type_by_name(cls, type_name):
return getattr(cls, '%s_TYPE' % type_name.upper(), None)
@classmethod
def get_type_names(cls):
return cls.type_names.values()
@classmethod
def get_name_by_type(cls, type_id):
return cls.type_names.get(type_id, "none")
@classmethod
def convert_value(cls, trait_type, value):
if trait_type is cls.INT_TYPE:
return int(value)
if trait_type is cls.FLOAT_TYPE:
return float(value)
if trait_type is cls.DATETIME_TYPE:
return timeutils.normalize_time(timeutils.parse_isotime(value))
# Cropping the text value to match the TraitText value size
if isinstance(value, six.binary_type):
return value.decode('utf-8')[:255]
return six.text_type(value)[:255]

View File

@ -1,140 +0,0 @@
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Common functions for MongoDB and DB2 backends
"""
from oslo_log import log
import pymongo
from ceilometer.event.storage import base
from ceilometer.event.storage import models
from ceilometer.i18n import _LE, _LI
from ceilometer.storage.mongo import utils as pymongo_utils
from ceilometer import utils
LOG = log.getLogger(__name__)
COMMON_AVAILABLE_CAPABILITIES = {
'events': {'query': {'simple': True}},
}
AVAILABLE_STORAGE_CAPABILITIES = {
'storage': {'production_ready': True},
}
class Connection(base.Connection):
"""Base event Connection class for MongoDB and DB2 drivers."""
CAPABILITIES = utils.update_nested(base.Connection.CAPABILITIES,
COMMON_AVAILABLE_CAPABILITIES)
STORAGE_CAPABILITIES = utils.update_nested(
base.Connection.STORAGE_CAPABILITIES,
AVAILABLE_STORAGE_CAPABILITIES,
)
def record_events(self, event_models):
"""Write the events to database.
:param event_models: a list of models.Event objects.
"""
error = None
for event_model in event_models:
traits = []
if event_model.traits:
for trait in event_model.traits:
traits.append({'trait_name': trait.name,
'trait_type': trait.dtype,
'trait_value': trait.value})
try:
self.db.event.insert(
{'_id': event_model.message_id,
'event_type': event_model.event_type,
'timestamp': event_model.generated,
'traits': traits, 'raw': event_model.raw})
except pymongo.errors.DuplicateKeyError as ex:
LOG.info(_LI("Duplicate event detected, skipping it: %s") % ex)
except Exception as ex:
LOG.exception(_LE("Failed to record event: %s") % ex)
error = ex
if error:
raise error
def get_events(self, event_filter):
"""Return an iter of models.Event objects.
:param event_filter: storage.EventFilter object, consists of filters
for events that are stored in database.
"""
q = pymongo_utils.make_events_query_from_filter(event_filter)
for event in self.db.event.find(q):
traits = []
for trait in event['traits']:
traits.append(models.Trait(name=trait['trait_name'],
dtype=int(trait['trait_type']),
value=trait['trait_value']))
yield models.Event(message_id=event['_id'],
event_type=event['event_type'],
generated=event['timestamp'],
traits=traits, raw=event.get('raw'))
def get_event_types(self):
"""Return all event types as an iter of strings."""
return self.db.event.distinct('event_type')
def get_trait_types(self, event_type):
"""Return a dictionary containing the name and data type of the trait.
Only trait types for the provided event_type are returned.
:param event_type: the type of the Event.
"""
trait_names = set()
events = self.db.event.find({'event_type': event_type})
for event in events:
for trait in event['traits']:
trait_name = trait['trait_name']
if trait_name not in trait_names:
# Here we check that our method return only unique
# trait types. Method will return only one trait type. It
# is proposed that certain trait name could have only one
# trait type.
trait_names.add(trait_name)
yield {'name': trait_name,
'data_type': trait['trait_type']}
def get_traits(self, event_type, trait_name=None):
"""Return all trait instances associated with an event_type.
If trait_type is specified, only return instances of that trait type.
:param event_type: the type of the Event to filter by
:param trait_name: the name of the Trait to filter by
"""
if not trait_name:
events = self.db.event.find({'event_type': event_type})
else:
# We choose events that simultaneously have event_type and certain
# trait_name, and retrieve events contains only mentioned traits.
events = self.db.event.find({'$and': [{'event_type': event_type},
{'traits.trait_name': trait_name}]},
{'traits': {'$elemMatch':
{'trait_name': trait_name}}
})
for event in events:
for trait in event['traits']:
yield models.Trait(name=trait['trait_name'],
dtype=trait['trait_type'],
value=trait['trait_value'])

View File

@ -1,160 +0,0 @@
#
# Copyright 2013 Rackspace Hosting.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class TraitPluginBase(object):
"""Base class for plugins.
It converts notification fields to Trait values.
"""
def __init__(self, **kw):
"""Setup the trait plugin.
For each Trait definition a plugin is used on in a conversion
definition, a new instance of the plugin will be created, and
initialized with the parameters (if any) specified in the
config file.
:param kw: the parameters specified in the event definitions file.
"""
super(TraitPluginBase, self).__init__()
@abc.abstractmethod
def trait_value(self, match_list):
"""Convert a set of fields to a Trait value.
This method is called each time a trait is attempted to be extracted
from a notification. It will be called *even if* no matching fields
are found in the notification (in that case, the match_list will be
empty). If this method returns None, the trait *will not* be added to
the event. Any other value returned by this method will be used as
the value for the trait. Values returned will be coerced to the
appropriate type for the trait.
:param match_list: A list (may be empty if no matches) of *tuples*.
Each tuple is (field_path, value) where field_path is the jsonpath
for that specific field.
Example::
trait's fields definition: ['payload.foobar',
'payload.baz',
'payload.thing.*']
notification body:
{
'message_id': '12345',
'publisher': 'someservice.host',
'payload': {
'foobar': 'test',
'thing': {
'bar': 12,
'boing': 13,
}
}
}
match_list will be: [('payload.foobar','test'),
('payload.thing.bar',12),
('payload.thing.boing',13)]
Here is a plugin that emulates the default (no plugin) behavior:
.. code-block:: python
class DefaultPlugin(TraitPluginBase):
"Plugin that returns the first field value."
def __init__(self, **kw):
super(DefaultPlugin, self).__init__()
def trait_value(self, match_list):
if not match_list:
return None
return match_list[0][1]
"""
class SplitterTraitPlugin(TraitPluginBase):
"""Plugin that splits a piece off of a string value."""
def __init__(self, separator=".", segment=0, max_split=None, **kw):
"""Setup how do split the field.
:param separator: String to split on. default "."
:param segment: Which segment to return. (int) default 0
:param max_split: Limit number of splits. Default: None (no limit)
"""
self.separator = separator
self.segment = segment
self.max_split = max_split
super(SplitterTraitPlugin, self).__init__(**kw)
def trait_value(self, match_list):
if not match_list:
return None
value = six.text_type(match_list[0][1])
if self.max_split is not None:
values = value.split(self.separator, self.max_split)
else:
values = value.split(self.separator)
try:
return values[self.segment]
except IndexError:
return None
class BitfieldTraitPlugin(TraitPluginBase):
"""Plugin to set flags on a bitfield."""
def __init__(self, initial_bitfield=0, flags=None, **kw):
"""Setup bitfield trait.
:param initial_bitfield: (int) initial value for the bitfield
Flags that are set will be OR'ed with this.
:param flags: List of dictionaries defining bitflags to set depending
on data in the notification. Each one has the following
keys:
path: jsonpath of field to match.
bit: (int) number of bit to set (lsb is bit 0)
value: set bit if corresponding field's value
matches this. If value is not provided,
bit will be set if the field exists (and
is non-null), regardless of it's value.
"""
self.initial_bitfield = initial_bitfield
if flags is None:
flags = []
self.flags = flags
super(BitfieldTraitPlugin, self).__init__(**kw)
def trait_value(self, match_list):
matches = dict(match_list)
bitfield = self.initial_bitfield
for flagdef in self.flags:
path = flagdef['path']
bit = 2 ** int(flagdef['bit'])
if path in matches:
if 'value' in flagdef:
if matches[path] == flagdef['value']:
bitfield |= bit
else:
bitfield |= bit
return bitfield

View File

@ -32,7 +32,7 @@ import re
# TODO(zqfan): When other oslo libraries switch over non-namespace'd
# imports, we need to add them to the regexp below.
oslo_namespace_imports = re.compile(
r"(from|import) oslo[.](concurrency|config|utils|i18n|serialization)")
r"(from|import) oslo[.](config|utils|i18n|serialization)")
def check_oslo_namespace_imports(logical_line, physical_line, filename):

View File

@ -1,77 +0,0 @@
# -*- encoding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo_config import cfg
from oslo_log import log
from ceilometer.agent import plugin_base
from ceilometer.i18n import _
from ceilometer import nova_client
LOG = log.getLogger(__name__)
OPTS = [
cfg.StrOpt('url_scheme',
default='snmp://',
help='URL scheme to use for hardware nodes.'),
cfg.StrOpt('readonly_user_name',
default='ro_snmp_user',
help='SNMPd user name of all nodes running in the cloud.'),
cfg.StrOpt('readonly_user_password',
default='password',
help='SNMPd password of all the nodes running in the cloud.',
secret=True),
]
cfg.CONF.register_opts(OPTS, group='hardware')
class NodesDiscoveryTripleO(plugin_base.DiscoveryBase):
def __init__(self):
super(NodesDiscoveryTripleO, self).__init__()
self.nova_cli = nova_client.Client()
@staticmethod
def _address(instance, field):
return instance.addresses['ctlplane'][0].get(field)
def discover(self, manager, param=None):
"""Discover resources to monitor."""
instances = self.nova_cli.instance_get_all()
resources = []
for instance in instances:
try:
ip_address = self._address(instance, 'addr')
final_address = (
cfg.CONF.hardware.url_scheme +
cfg.CONF.hardware.readonly_user_name + ':' +
cfg.CONF.hardware.readonly_user_password + '@' +
ip_address)
resource = {
'resource_id': instance.id,
'resource_url': final_address,
'mac_addr': self._address(instance,
'OS-EXT-IPS-MAC:mac_addr'),
'image_id': instance.image['id'],
'flavor_id': instance.flavor['id']
}
resources.append(resource)
except KeyError:
LOG.error(_("Couldn't obtain IP address of "
"instance %s") % instance.id)
return resources

View File

@ -1,26 +0,0 @@
#
# Copyright 2014 Intel Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from stevedore import driver
def get_inspector(parsed_url, namespace='ceilometer.hardware.inspectors'):
"""Get inspector driver and load it.
:param parsed_url: urlparse.SplitResult object for the inspector
:param namespace: Namespace to use to look for drivers.
"""
loaded_driver = driver.DriverManager(namespace, parsed_url.scheme)
return loaded_driver.driver()

View File

@ -1,39 +0,0 @@
#
# Copyright 2014 ZHAW SoE
#
# Authors: Lucas Graf <graflu0@students.zhaw.ch>
# Toni Zehnder <zehndton@students.zhaw.ch>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Inspector abstraction for read-only access to hardware components"""
import abc
import six
@six.add_metaclass(abc.ABCMeta)
class Inspector(object):
@abc.abstractmethod
def inspect_generic(self, host, identifier, cache, extra_metadata=None):
"""A generic inspect function.
:param host: the target host
:param identifier: the identifier of the metric
:param cache: cache passed from the pollster
:param extra_metadata: extra dict to be used as metadata
:return: an iterator of (value, metadata, extra)
:return value: the sample value
:return metadata: dict to construct sample's metadata
:return extra: dict of extra metadata to help constructing sample
"""

View File

@ -1,442 +0,0 @@
#
# Copyright 2014 ZHAW SoE
# Copyright 2014 Intel Corp
#
# Authors: Lucas Graf <graflu0@students.zhaw.ch>
# Toni Zehnder <zehndton@students.zhaw.ch>
# Lianhao Lu <lianhao.lu@intel.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Inspector for collecting data over SNMP"""
from pysnmp.entity.rfc3413.oneliner import cmdgen
import six
from ceilometer.hardware.inspector import base
class SNMPException(Exception):
pass
def parse_snmp_return(ret, is_bulk=False):
"""Check the return value of snmp operations
:param ret: a tuple of (errorIndication, errorStatus, errorIndex, data)
returned by pysnmp
:param is_bulk: True if the ret value is from GetBulkRequest
:return: a tuple of (err, data)
err: True if error found, or False if no error found
data: a string of error description if error found, or the
actual return data of the snmp operation
"""
err = True
(errIndication, errStatus, errIdx, varBinds) = ret
if errIndication:
data = errIndication
elif errStatus:
if is_bulk:
varBinds = varBinds[-1]
data = "%s at %s" % (errStatus.prettyPrint(),
errIdx and varBinds[int(errIdx) - 1] or "?")
else:
err = False
data = varBinds
return err, data
EXACT = 'type_exact'
PREFIX = 'type_prefix'
class SNMPInspector(base.Inspector):
# CPU OIDs
_cpu_1_min_load_oid = "1.3.6.1.4.1.2021.10.1.3.1"
_cpu_5_min_load_oid = "1.3.6.1.4.1.2021.10.1.3.2"
_cpu_15_min_load_oid = "1.3.6.1.4.1.2021.10.1.3.3"
# Memory OIDs
_memory_total_oid = "1.3.6.1.4.1.2021.4.5.0"
_memory_avail_real_oid = "1.3.6.1.4.1.2021.4.6.0"
_memory_total_swap_oid = "1.3.6.1.4.1.2021.4.3.0"
_memory_avail_swap_oid = "1.3.6.1.4.1.2021.4.4.0"
_memory_buffer_oid = "1.3.6.1.4.1.2021.4.14.0"
_memory_cached_oid = "1.3.6.1.4.1.2021.4.15.0"
# Disk OIDs
_disk_index_oid = "1.3.6.1.4.1.2021.9.1.1"
_disk_path_oid = "1.3.6.1.4.1.2021.9.1.2"
_disk_device_oid = "1.3.6.1.4.1.2021.9.1.3"
_disk_size_oid = "1.3.6.1.4.1.2021.9.1.6"
_disk_used_oid = "1.3.6.1.4.1.2021.9.1.8"
# Network Interface OIDs
_interface_index_oid = "1.3.6.1.2.1.2.2.1.1"
_interface_name_oid = "1.3.6.1.2.1.2.2.1.2"
_interface_speed_oid = "1.3.6.1.2.1.2.2.1.5"
_interface_mac_oid = "1.3.6.1.2.1.2.2.1.6"
_interface_ip_oid = "1.3.6.1.2.1.4.20.1.2"
_interface_received_oid = "1.3.6.1.2.1.2.2.1.10"
_interface_transmitted_oid = "1.3.6.1.2.1.2.2.1.16"
_interface_error_oid = "1.3.6.1.2.1.2.2.1.20"
# System stats
_system_stats_cpu_idle_oid = "1.3.6.1.4.1.2021.11.11.0"
_system_stats_io_raw_sent_oid = "1.3.6.1.4.1.2021.11.57.0"
_system_stats_io_raw_received_oid = "1.3.6.1.4.1.2021.11.58.0"
# Network stats
_network_ip_out_requests_oid = "1.3.6.1.2.1.4.10.0"
_network_ip_in_receives_oid = "1.3.6.1.2.1.4.3.0"
# Default port
_port = 161
_disk_metadata = {
'path': (_disk_path_oid, str),
'device': (_disk_device_oid, str),
}
_net_metadata = {
'name': (_interface_name_oid, str),
'speed': (_interface_speed_oid, lambda x: int(x) / 8),
'mac': (_interface_mac_oid,
lambda x: x.prettyPrint().replace('0x', '')),
}
_CACHE_KEY_OID = "snmp_cached_oid"
'''
The following mapping define how to construct
(value, metadata, extra) returned by inspect_generic
MAPPING = {
'identifier: {
'matching_type': EXACT or PREFIX,
'metric_oid': (oid, value_converter)
'metadata': {
metadata_name1: (oid1, value_converter),
metadata_name2: (oid2, value_converter),
},
'post_op': special func to modify the return data,
},
}
For matching_type of EXACT, each item in the above mapping will
return exact one (value, metadata, extra) tuple. The value would be
returned from SNMP request GetRequest for oid of 'metric_oid', the
metadata dict would be constructed based on the returning from SNMP
GetRequest for oids of 'metadata'.
For matching_type of PREFIX, SNMP request GetBulkRequest
would be send to get values for oids of 'metric_oid' and
'metadata' of each item in the above mapping. And each item might
return multiple (value, metadata, extra) tuple, e.g.
Suppose we have the following mapping:
MAPPING = {
'disk.size.total': {
'matching_type': PREFIX,
'metric_oid': ("1.3.6.1.4.1.2021.9.1.6", int)
'metadata': {
'device': ("1.3.6.1.4.1.2021.9.1.3", str),
'path': ("1.3.6.1.4.1.2021.9.1.2", str),
},
'post_op': None,
},
and the SNMP have the following oid/value(s):
{
'1.3.6.1.4.1.2021.9.1.6.1': 19222656,
'1.3.6.1.4.1.2021.9.1.3.1': "/dev/sda2",
'1.3.6.1.4.1.2021.9.1.2.1': "/"
'1.3.6.1.4.1.2021.9.1.6.2': 808112,
'1.3.6.1.4.1.2021.9.1.3.2': "tmpfs",
'1.3.6.1.4.1.2021.9.1.2.2': "/run",
}
So here we'll return 2 instances of (value, metadata, extra):
(19222656, {'device': "/dev/sda2", 'path': "/"}, None)
(808112, {'device': "tmpfs", 'path': "/run"}, None)
The post_op is assumed to be implemented by new metric developer. It
could be used to add additional special metadata(e.g. ip address), or
it could be used to add information into extra dict to be returned
to construct the pollster how to build final sample, e.g.
extra.update('project_id': xy, 'user_id': zw)
'''
MAPPING = {
'cpu.load.1min': {
'matching_type': EXACT,
'metric_oid': (_cpu_1_min_load_oid, lambda x: float(str(x))),
'metadata': {},
'post_op': None
},
'cpu.load.5min': {
'matching_type': EXACT,
'metric_oid': (_cpu_5_min_load_oid, lambda x: float(str(x))),
'metadata': {},
'post_op': None,
},
'cpu.load.15min': {
'matching_type': EXACT,
'metric_oid': (_cpu_15_min_load_oid, lambda x: float(str(x))),
'metadata': {},
'post_op': None,
},
'memory.total': {
'matching_type': EXACT,
'metric_oid': (_memory_total_oid, int),
'metadata': {},
'post_op': None,
},
'memory.used': {
'matching_type': EXACT,
'metric_oid': (_memory_avail_real_oid, int),
'metadata': {},
'post_op': "_post_op_memory_avail_to_used",
},
'memory.swap.total': {
'matching_type': EXACT,
'metric_oid': (_memory_total_swap_oid, int),
'metadata': {},
'post_op': None,
},
'memory.swap.avail': {
'matching_type': EXACT,
'metric_oid': (_memory_avail_swap_oid, int),
'metadata': {},
'post_op': None,
},
'memory.buffer': {
'matching_type': EXACT,
'metric_oid': (_memory_buffer_oid, int),
'metadata': {},
'post_op': None,
},
'memory.cached': {
'matching_type': EXACT,
'metric_oid': (_memory_cached_oid, int),
'metadata': {},
'post_op': None,
},
'disk.size.total': {
'matching_type': PREFIX,
'metric_oid': (_disk_size_oid, int),
'metadata': _disk_metadata,
'post_op': None,
},
'disk.size.used': {
'matching_type': PREFIX,
'metric_oid': (_disk_used_oid, int),
'metadata': _disk_metadata,
'post_op': None,
},
'network.incoming.bytes': {
'matching_type': PREFIX,
'metric_oid': (_interface_received_oid, int),
'metadata': _net_metadata,
'post_op': "_post_op_net",
},
'network.outgoing.bytes': {
'matching_type': PREFIX,
'metric_oid': (_interface_transmitted_oid, int),
'metadata': _net_metadata,
'post_op': "_post_op_net",
},
'network.outgoing.errors': {
'matching_type': PREFIX,
'metric_oid': (_interface_error_oid, int),
'metadata': _net_metadata,
'post_op': "_post_op_net",
},
'network.ip.outgoing.datagrams': {
'matching_type': EXACT,
'metric_oid': (_network_ip_out_requests_oid, int),
'metadata': {},
'post_op': None,
},
'network.ip.incoming.datagrams': {
'matching_type': EXACT,
'metric_oid': (_network_ip_in_receives_oid, int),
'metadata': {},
'post_op': None,
},
'system_stats.cpu.idle': {
'matching_type': EXACT,
'metric_oid': (_system_stats_cpu_idle_oid, int),
'metadata': {},
'post_op': None,
},
'system_stats.io.outgoing.blocks': {
'matching_type': EXACT,
'metric_oid': (_system_stats_io_raw_sent_oid, int),
'metadata': {},
'post_op': None,
},
'system_stats.io.incoming.blocks': {
'matching_type': EXACT,
'metric_oid': (_system_stats_io_raw_received_oid, int),
'metadata': {},
'post_op': None,
},
}
def __init__(self):
super(SNMPInspector, self).__init__()
self._cmdGen = cmdgen.CommandGenerator()
def _query_oids(self, host, oids, cache, is_bulk):
# send GetRequest or GetBulkRequest to get oid values and
# populate the values into cache
authData = self._get_auth_strategy(host)
transport = cmdgen.UdpTransportTarget((host.hostname,
host.port or self._port))
oid_cache = cache.setdefault(self._CACHE_KEY_OID, {})
if is_bulk:
ret = self._cmdGen.bulkCmd(authData,
transport,
0, 100,
*oids,
lookupValues=True)
else:
ret = self._cmdGen.getCmd(authData,
transport,
*oids,
lookupValues=True)
(error, data) = parse_snmp_return(ret, is_bulk)
if error:
raise SNMPException("An error occurred, oids %(oid)s, "
"host %(host)s, %(err)s" %
dict(oid=oids,
host=host.hostname,
err=data))
# save result into cache
if is_bulk:
for var_bind_table_row in data:
for name, val in var_bind_table_row:
oid_cache[name.prettyPrint()] = val
else:
for name, val in data:
oid_cache[name.prettyPrint()] = val
@staticmethod
def find_matching_oids(oid_cache, oid, match_type, find_one=True):
matched = []
if match_type == PREFIX:
for key in oid_cache.keys():
if key.startswith(oid):
matched.append(key)
if find_one:
break
else:
if oid in oid_cache:
matched.append(oid)
return matched
@staticmethod
def get_oid_value(oid_cache, oid_def, suffix=''):
oid, converter = oid_def
value = oid_cache[oid + suffix]
if converter:
value = converter(value)
return value
@classmethod
def construct_metadata(cls, oid_cache, meta_defs, suffix=''):
metadata = {}
for key, oid_def in six.iteritems(meta_defs):
metadata[key] = cls.get_oid_value(oid_cache, oid_def, suffix)
return metadata
@classmethod
def _find_missing_oids(cls, meter_def, cache):
# find oids have not been queried and cached
new_oids = []
oid_cache = cache.setdefault(cls._CACHE_KEY_OID, {})
# check metric_oid
if not cls.find_matching_oids(oid_cache,
meter_def['metric_oid'][0],
meter_def['matching_type']):
new_oids.append(meter_def['metric_oid'][0])
for metadata in meter_def['metadata'].values():
if not cls.find_matching_oids(oid_cache,
metadata[0],
meter_def['matching_type']):
new_oids.append(metadata[0])
return new_oids
def inspect_generic(self, host, identifier, cache, extra_metadata=None):
# the snmp definition for the corresponding meter
meter_def = self.MAPPING[identifier]
# collect oids that needs to be queried
oids_to_query = self._find_missing_oids(meter_def, cache)
# query oids and populate into caches
if oids_to_query:
self._query_oids(host, oids_to_query, cache,
meter_def['matching_type'] == PREFIX)
# construct (value, metadata, extra)
oid_cache = cache[self._CACHE_KEY_OID]
# find all oids which needed to construct final sample values
# for matching type of EXACT, only 1 sample would be generated
# for matching type of PREFIX, multiple samples could be generated
oids_for_sample_values = self.find_matching_oids(
oid_cache,
meter_def['metric_oid'][0],
meter_def['matching_type'],
False)
extra_metadata = extra_metadata or {}
for oid in oids_for_sample_values:
suffix = oid[len(meter_def['metric_oid'][0]):]
value = self.get_oid_value(oid_cache,
meter_def['metric_oid'],
suffix)
# get the metadata for this sample value
metadata = self.construct_metadata(oid_cache,
meter_def['metadata'],
suffix)
# call post_op for special cases
if meter_def['post_op']:
func = getattr(self, meter_def['post_op'], None)
if func:
value = func(host, cache, meter_def,
value, metadata, extra_metadata,
suffix)
yield (value, metadata, extra_metadata)
def _post_op_memory_avail_to_used(self, host, cache, meter_def,
value, metadata, extra, suffix):
if self._memory_total_oid not in cache[self._CACHE_KEY_OID]:
self._query_oids(host, [self._memory_total_oid], cache, False)
value = int(cache[self._CACHE_KEY_OID][self._memory_total_oid]) - value
return value
def _post_op_net(self, host, cache, meter_def,
value, metadata, extra, suffix):
# add ip address into metadata
oid_cache = cache.setdefault(self._CACHE_KEY_OID, {})
if not self.find_matching_oids(oid_cache,
self._interface_ip_oid,
PREFIX):
# populate the oid into cache
self._query_oids(host, [self._interface_ip_oid], cache, True)
ip_addr = ''
for k, v in six.iteritems(oid_cache):
if k.startswith(self._interface_ip_oid) and v == int(suffix[1:]):
ip_addr = k.replace(self._interface_ip_oid + ".", "")
metadata.update(ip=ip_addr)
return value
@staticmethod
def _get_auth_strategy(host):
if host.password:
auth_strategy = cmdgen.UsmUserData(host.username,
authKey=host.password)
else:
auth_strategy = cmdgen.CommunityData(host.username or 'public')
return auth_strategy

View File

@ -1,144 +0,0 @@
#
# Copyright 2013 ZHAW SoE
# Copyright 2014 Intel Corp.
#
# Authors: Lucas Graf <graflu0@students.zhaw.ch>
# Toni Zehnder <zehndton@students.zhaw.ch>
# Lianhao Lu <lianhao.lu@intel.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Base class for plugins used by the hardware agent."""
import abc
import itertools
from oslo_log import log
from oslo_utils import netutils
import six
from ceilometer.agent import plugin_base
from ceilometer.hardware import inspector as insloader
from ceilometer.i18n import _
LOG = log.getLogger(__name__)
@six.add_metaclass(abc.ABCMeta)
class HardwarePollster(plugin_base.PollsterBase):
"""Base class for plugins that support the polling API."""
CACHE_KEY = None
IDENTIFIER = None
def __init__(self):
super(HardwarePollster, self).__init__()
self.inspectors = {}
@property
def default_discovery(self):
return 'tripleo_overcloud_nodes'
@staticmethod
def _parse_resource(res):
"""Parse resource from discovery.
Either URL can be given or dict. Dict has to contain at least
keys 'resource_id' and 'resource_url', all the dict keys will be stored
as metadata.
:param res: URL or dict containing all resource info.
:return parsed_url, resource_id, metadata: Returns parsed URL used for
SNMP query, unique identifier of the resource and metadata
of the resource.
"""
if isinstance(res, dict):
if 'resource_url' not in res or 'resource_id' not in res:
LOG.exception(_('Passed resource dict must contain keys '
'resource_id and resource_url.'))
metadata = res
parsed_url = netutils.urlsplit(res['resource_url'])
resource_id = res['resource_id']
else:
metadata = {}
parsed_url = netutils.urlsplit(res)
resource_id = res
return parsed_url, resource_id, metadata
def get_samples(self, manager, cache, resources=None):
"""Return an iterable of Sample instances from polling the resources.
:param manager: The service manager invoking the plugin
:param cache: A dictionary for passing data between plugins
:param resources: end point to poll data from
"""
resources = resources or []
h_cache = cache.setdefault(self.CACHE_KEY, {})
sample_iters = []
for resource in resources:
parsed_url, res, extra_metadata = self._parse_resource(resource)
ins = self._get_inspector(parsed_url)
try:
# Call hardware inspector to poll for the data
i_cache = h_cache.setdefault(res, {})
if self.IDENTIFIER not in i_cache:
i_cache[self.IDENTIFIER] = list(ins.inspect_generic(
parsed_url,
self.IDENTIFIER,
i_cache,
extra_metadata))
# Generate samples
if i_cache[self.IDENTIFIER]:
sample_iters.append(self.generate_samples(
parsed_url,
i_cache[self.IDENTIFIER]))
except Exception as err:
LOG.exception(_('inspector call failed for %(ident)s '
'host %(host)s: %(err)s'),
dict(ident=self.IDENTIFIER,
host=parsed_url.hostname,
err=err))
return itertools.chain(*sample_iters)
def generate_samples(self, host_url, data):
"""Generate an iterable Sample from the data returned by inspector
:param host_url: host url of the endpoint
:param data: list of data returned by the corresponding inspector
"""
return (self.generate_one_sample(host_url, datum) for datum in data)
@abc.abstractmethod
def generate_one_sample(self, host_url, c_data):
"""Return one Sample.
:param host_url: host url of the endpoint
:param c_data: data returned by the inspector.inspect_generic,
tuple of (value, metadata, extra)
"""
def _get_inspector(self, parsed_url):
if parsed_url.scheme not in self.inspectors:
try:
driver = insloader.get_inspector(parsed_url)
self.inspectors[parsed_url.scheme] = driver
except Exception as err:
LOG.exception(_("Can NOT load inspector %(name)s: %(err)s"),
dict(name=parsed_url.scheme,
err=err))
raise err
return self.inspectors[parsed_url.scheme]

View File

@ -1,50 +0,0 @@
#
# Copyright 2013 ZHAW SoE
# Copyright 2014 Intel Corp.
#
# Authors: Lucas Graf <graflu0@students.zhaw.ch>
# Toni Zehnder <zehndton@students.zhaw.ch>
# Lianhao Lu <lianhao.lu@intel.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometer.hardware import plugin
from ceilometer.hardware.pollsters import util
from ceilometer import sample
class _Base(plugin.HardwarePollster):
CACHE_KEY = 'cpu'
def generate_one_sample(self, host, c_data):
value, metadata, extra = c_data
return util.make_sample_from_host(host,
name=self.IDENTIFIER,
sample_type=sample.TYPE_GAUGE,
unit='process',
volume=value,
res_metadata=metadata,
extra=extra)
class CPULoad1MinPollster(_Base):
IDENTIFIER = 'cpu.load.1min'
class CPULoad5MinPollster(_Base):
IDENTIFIER = 'cpu.load.5min'
class CPULoad15MinPollster(_Base):
IDENTIFIER = 'cpu.load.15min'

View File

@ -1,50 +0,0 @@
#
# Copyright 2013 ZHAW SoE
# Copyright 2014 Intel Corp.
#
# Authors: Lucas Graf <graflu0@students.zhaw.ch>
# Toni Zehnder <zehndton@students.zhaw.ch>
# Lianhao Lu <lianhao.lu@intel.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometer.hardware import plugin
from ceilometer.hardware.pollsters import util
from ceilometer import sample
class _Base(plugin.HardwarePollster):
CACHE_KEY = 'disk'
def generate_one_sample(self, host, c_data):
value, metadata, extra = c_data
res_id = extra.get('resource_id') or host.hostname
if metadata.get('device'):
res_id = res_id + ".%s" % metadata.get('device')
return util.make_sample_from_host(host,
name=self.IDENTIFIER,
sample_type=sample.TYPE_GAUGE,
unit='B',
volume=value,
res_metadata=metadata,
extra=extra,
resource_id=res_id)
class DiskTotalPollster(_Base):
IDENTIFIER = 'disk.size.total'
class DiskUsedPollster(_Base):
IDENTIFIER = 'disk.size.used'

View File

@ -1,62 +0,0 @@
#
# Copyright 2013 ZHAW SoE
# Copyright 2014 Intel Corp.
#
# Authors: Lucas Graf <graflu0@students.zhaw.ch>
# Toni Zehnder <zehndton@students.zhaw.ch>
# Lianhao Lu <lianhao.lu@intel.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometer.hardware import plugin
from ceilometer.hardware.pollsters import util
from ceilometer import sample
class _Base(plugin.HardwarePollster):
CACHE_KEY = 'memory'
def generate_one_sample(self, host, c_data):
value, metadata, extra = c_data
return util.make_sample_from_host(host,
name=self.IDENTIFIER,
sample_type=sample.TYPE_GAUGE,
unit='B',
volume=value,
res_metadata=metadata,
extra=extra)
class MemoryTotalPollster(_Base):
IDENTIFIER = 'memory.total'
class MemoryUsedPollster(_Base):
IDENTIFIER = 'memory.used'
class MemorySwapTotalPollster(_Base):
IDENTIFIER = 'memory.swap.total'
class MemorySwapAvailPollster(_Base):
IDENTIFIER = 'memory.swap.avail'
class MemoryBufferPollster(_Base):
IDENTIFIER = 'memory.buffer'
class MemoryCachedPollster(_Base):
IDENTIFIER = 'memory.cached'

View File

@ -1,57 +0,0 @@
#
# Copyright 2013 ZHAW SoE
# Copyright 2014 Intel Corp.
#
# Authors: Lucas Graf <graflu0@students.zhaw.ch>
# Toni Zehnder <zehndton@students.zhaw.ch>
# Lianhao Lu <lianhao.lu@intel.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometer.hardware import plugin
from ceilometer.hardware.pollsters import util
from ceilometer import sample
class _Base(plugin.HardwarePollster):
CACHE_KEY = 'nic'
def generate_one_sample(self, host, c_data):
value, metadata, extra = c_data
res_id = extra.get('resource_id') or host.hostname
if metadata.get('name'):
res_id = res_id + ".%s" % metadata.get('name')
return util.make_sample_from_host(host,
name=self.IDENTIFIER,
sample_type=sample.TYPE_CUMULATIVE,
unit=self.unit,
volume=value,
res_metadata=metadata,
extra=extra,
resource_id=res_id)
class IncomingBytesPollster(_Base):
IDENTIFIER = 'network.incoming.bytes'
unit = 'B'
class OutgoingBytesPollster(_Base):
IDENTIFIER = 'network.outgoing.bytes'
unit = 'B'
class OutgoingErrorsPollster(_Base):
IDENTIFIER = 'network.outgoing.errors'
unit = 'packet'

View File

@ -1,38 +0,0 @@
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from ceilometer.hardware import plugin
from ceilometer.hardware.pollsters import util
from ceilometer import sample
class _Base(plugin.HardwarePollster):
CACHE_KEY = 'network'
def generate_one_sample(self, host, c_data):
value, metadata, extra = c_data
return util.make_sample_from_host(host,
name=self.IDENTIFIER,
sample_type=sample.TYPE_CUMULATIVE,
unit='datagrams',
volume=value,
res_metadata=metadata,
extra=extra)
class NetworkAggregatedIPOutRequests(_Base):
IDENTIFIER = 'network.ip.outgoing.datagrams'
class NetworkAggregatedIPInReceives(_Base):
IDENTIFIER = 'network.ip.incoming.datagrams'

Some files were not shown because too many files have changed in this diff Show More