Moving telegraf-plugins to drop them properly

When playbook-influx-telegraf.yml runs, it uses roles from mgrzybek
openstack-ansible-telegraf repo. Playbooks from that repo loads
search scripts in different dirs and reading a source path.

Change-Id: Ib1ca9f60ad5e686790b56e1c66ab53ed9cc490b7
This commit is contained in:
Ramon Orru 2018-03-08 14:20:02 +01:00
parent 1b1e2853d1
commit 2113b36bf0
5 changed files with 10 additions and 520 deletions

View File

@ -14,7 +14,7 @@
# limitations under the License. # limitations under the License.
- name: Deploy telegraf - name: Deploy telegraf
hosts: "all" hosts: "all:!elk_all"
gather_facts: true gather_facts: true
user: root user: root
roles: roles:
@ -31,20 +31,29 @@
telegraf_openstack_scripts: telegraf_openstack_scripts:
ironic: ironic:
plugin_name: "ironic_nodes.py" plugin_name: "ironic_nodes.py"
plugin_source_path: "scripts/ironic_nodes.py"
command: command:
- "python /opt/telegraf/ironic_nodes.py" - "python /opt/telegraf/ironic_nodes.py"
sudoers_entry:
- "{{ telegraf_openstack_scripts_path }}/ironic_nodes.py"
group: "{{ groups['utility_all'][0] }}" group: "{{ groups['utility_all'][0] }}"
when_group: "{{ (groups['ironic_api'] | default([]) | length) > 0 }}" when_group: "{{ (groups['ironic_api'] | default([]) | length) > 0 }}"
kvm: kvm:
plugin_name: "kvm_virsh.py" plugin_name: "kvm_virsh.py"
plugin_source_path: "scripts/kvm_virsh.py"
command: command:
- "python /opt/telegraf/kvm_virsh.py" - "python /opt/telegraf/kvm_virsh.py"
sudoers_entry:
- "{{ telegraf_openstack_scripts_path }}/kvm_virsh.py"
group: "{{ groups['nova_compute'] }}" group: "{{ groups['nova_compute'] }}"
when_group: "{{ (groups['nova_compute'] | default([]) | length) > 0 and (nova_virt_type | default('qemu') in ['kvm', 'qemu']) }}" when_group: "{{ (groups['nova_compute'] | default([]) | length) > 0 and (nova_virt_type | default('qemu') in ['kvm', 'qemu']) }}"
cinder_pools_usage: cinder_pools_usage:
plugin_name: "cinder_pools_usage.py" plugin_name: "cinder_pools_usage.py"
plugin_source_path: "scripts/cinder_pools_usage.py"
command: command:
- "python /opt/telegraf/cinder_pools_usage.py" - "python /opt/telegraf/cinder_pools_usage.py"
sudoers_entry:
- "{{ telegraf_openstack_scripts_path }}/cinder_pools_usage.py"
group: "{{ groups['utility_all'][0] }}" group: "{{ groups['utility_all'][0] }}"
when_group: "{{ (groups['cinder_volumes'] | default([]) | length) > 0 }}" when_group: "{{ (groups['cinder_volumes'] | default([]) | length) > 0 }}"
telegraf_output_influxdb_targets: telegraf_output_influxdb_targets:

View File

@ -1,85 +0,0 @@
#!/usr/bin/env python
# Copyright 2016, Intel US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This script calls the cinder API and gathers the volume group capacity
# information and outputs to Influx Protocol Line format
from openstack import connection as os_conn
OS_AUTH_ARGS = {
'auth_url': '{{ keystone_service_internalurl }}',
'project_name': '{{ keystone_admin_tenant_name }}',
'user_domain_name': '{{ openrc_os_domain_name }}',
'project_domain_name': '{{ openrc_os_domain_name }}',
'username': '{{ keystone_admin_user_name }}',
'password': '{{ keystone_auth_admin_password }}',
}
OS_CONNECTION = {'conn': None}
def _connect():
if OS_CONNECTION['conn']:
return OS_CONNECTION['conn']
else:
OS_CONNECTION['conn'] = os_conn.Connection(**OS_AUTH_ARGS)
return OS_CONNECTION['conn']
def main():
pool_data = dict()
conn = _connect()
url = conn.block_store.session.get_endpoint(
interface='internal',
service_type='volume'
)
block_store_data_raw = conn.block_store.session.get(
url + '/scheduler-stats/get_pools?detail=True'
)
block_store_data = block_store_data_raw.json()
total_capacity_gb = 0
free_capacity_gb = 0
for item in block_store_data.get('pools', []):
name = item.get('name')
if name:
cap = item['capabilities']
_total_capacity_gb = float(cap.get('total_capacity_gb', 0))
_free_capacity_gb = float(cap.get('free_capacity_gb', 0))
pool_name = cap.get('pool_name')
if pool_name in pool_data:
pool = pool_data[pool_name]
else:
pool = pool_data[pool_name] = dict()
pool[name] = 100 * _free_capacity_gb / _total_capacity_gb
free_capacity_gb += _free_capacity_gb
total_capacity_gb += _total_capacity_gb
finalized_data = dict()
for key, value in pool_data.items():
data = finalized_data['cinder,pool=%s' % key] = list()
for k, v in value.items():
data.append('%s=%s' % (k.replace(' ', '_'), v))
for key, value in finalized_data.items():
print('%s %s' % (key, ','.join(value)))
tup = 100 * free_capacity_gb / total_capacity_gb
totals = 'cinder_totals cinder_total_percent_used=%s' % tup
print(totals)
if __name__ == '__main__':
main()

View File

@ -1,202 +0,0 @@
#!/bin/python
#
# Copyright 2016, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
import dbm
import json
import os
import tempfile
import MySQLdb as mysql
from MySQLdb.constants import FIELD_TYPE
from openstack import connection as os_conn
from openstack import exceptions as os_exp
OS_AUTH_ARGS = {
'auth_url': '{{ keystone_service_internalurl }}',
'project_name': '{{ keystone_admin_tenant_name }}',
'user_domain_name': '{{ openrc_os_domain_name }}',
'project_domain_name': '{{ openrc_os_domain_name }}',
'username': '{{ keystone_admin_user_name }}',
'password': '{{ keystone_auth_admin_password }}',
}
OS_CONNECTION = {'conn': None}
def line_return(collection, metric_name):
system_states_return = '%s ' % metric_name
for key, value in collection.items():
system_states_return += '%s=%s,' % (key.replace(' ', '_'), value)
else:
system_states_return = system_states_return.rstrip(',')
return system_states_return
def run_query(db_name, query):
db = mysql.connect(
db=db_name,
read_default_file=os.path.expanduser('~/.my.cnf'),
conv={FIELD_TYPE.LONG: int}
)
try:
db.query(query)
output = db.store_result()
except mysql.OperationalError:
SystemExit('DB Query failed')
else:
return output.fetch_row(maxrows=0)
finally:
db.close()
def _connect():
if OS_CONNECTION['conn']:
return OS_CONNECTION['conn']
else:
OS_CONNECTION['conn'] = os_conn.Connection(**OS_AUTH_ARGS)
return OS_CONNECTION['conn']
def consumer_db(consumer_id):
cdb = dbm.open(os.path.join(tempfile.gettempdir(), 'cdb.dbm'), 'c')
try:
project_name = cdb.get(consumer_id)
if not project_name:
conn = _connect()
project_info = conn.identity.get_project(consumer_id)
project_name = cdb[consumer_id] = project_info['name']
except os_exp.ResourceNotFound:
return 'UNKNOWN'
else:
return project_name
finally:
cdb.close()
def consumer_limits(consumer_id):
conn = _connect()
url = conn.compute.session.get_endpoint(
interface='internal',
service_type='compute'
)
quota_data = conn.compute.session.get(
url + '/os-quota-sets/' + consumer_id
)
quota_data = quota_data.json()
return quota_data['quota_set']['instances']
def main():
return_data = []
system_types = collections.Counter()
system_types_used = collections.Counter()
system_states = collections.Counter()
system_used = collections.Counter()
system_consumers = collections.Counter()
system_consumer_limits = dict()
system_consumer_map = dict()
datas = run_query(
db_name='{{ ironic_galera_database|default("ironic") }}',
query="""select instance_uuid,properties,provision_state from nodes"""
)
for data in datas:
x = json.loads(data[1])
system_states[data[-1]] += 1
node_consumed = data[0]
system_used['total'] += 1
if node_consumed:
system_used['in_use'] += 1
else:
system_used['available'] += 1
for capability in x['capabilities'].split(','):
if capability.startswith('system_type'):
system_type = capability.split(':')[-1]
system_types[system_type] += 1
if node_consumed:
system_types_used[system_type] += 1
_query = (
"""select project_id from instances where uuid='%s'"""
) % node_consumed
_project_id = run_query(
db_name='{{ nova_galera_database|default("nova") }}',
query=_query
)
project_id = _project_id[0][0]
project_name = consumer_db(project_id)
system_consumer_map[project_id] = project_name
system_consumers[project_name] += 1
break
if system_consumers:
for key, value in system_consumer_map.items():
system_consumer_limits[value] = consumer_limits(key)
system_used['total_reserved'] = sum(system_consumer_limits.values())
return_data.append(
line_return(
collection=system_types,
metric_name='ironic_node_flavors'
)
)
return_data.append(
line_return(
collection=system_types_used,
metric_name='ironic_node_flavors_used'
)
)
return_data.append(
line_return(
collection=system_states,
metric_name='ironic_node_states'
)
)
return_data.append(
line_return(
collection=system_used,
metric_name='ironic_nodes_used'
)
)
return_data.append(
line_return(
collection=system_consumers,
metric_name='ironic_consumers'
)
)
return_data.append(
line_return(
collection=system_consumer_limits,
metric_name='ironic_consumer_limits'
)
)
for item in return_data:
print(item)
if __name__ == '__main__':
main()

View File

@ -1,27 +0,0 @@
#!/usr/bin/env python
import libvirt
import socket
return_data = dict()
conn = libvirt.openReadOnly()
try:
domains = conn.listDomainsID()
return_data['kvm_vms'] = len(domains)
return_data['kvm_total_vcpus'] = conn.getCPUMap()[0]
return_data['kvm_scheduled_vcpus'] = 0
for domain in domains:
return_data['kvm_scheduled_vcpus'] += conn.lookupByID(
domain
).maxVcpus()
return_data['kvm_host_id'] = abs(hash(socket.getfqdn()))
except Exception as exp:
raise SystemExit('Plugin failure -- Reason: "%s"' % exp)
else:
line_data = 'kvm '
for key, value in return_data.items():
line_data += '%s=%s,' % (key.replace(' ', '_'), value)
else:
line_data = line_data.rstrip(',')
print(line_data)
finally:
conn.close()

View File

@ -1,205 +0,0 @@
#!/bin/python
#
# Copyright 2016, Rackspace US, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import collections
from openstack import connection as os_conn
OS_AUTH_ARGS = {
'auth_url': '{{ keystone_service_internalurl }}',
'project_name': '{{ keystone_admin_tenant_name }}',
'user_domain_name': '{{ openrc_os_domain_name }}',
'project_domain_name': '{{ openrc_os_domain_name }}',
'username': '{{ keystone_admin_user_name }}',
'password': '{{ keystone_auth_admin_password }}',
}
OS_CONNECTION = {'conn': None}
def line_return(collection, metric_name):
system_states_return = '%s ' % metric_name
for key, value in collection.items():
system_states_return += '%s=%s,' % (key.replace(' ', '_'), value)
else:
system_states_return = system_states_return.rstrip(',')
return system_states_return
def _connect():
if OS_CONNECTION['conn']:
return OS_CONNECTION['conn']
else:
OS_CONNECTION['conn'] = os_conn.Connection(**OS_AUTH_ARGS)
return OS_CONNECTION['conn']
def get_consumers():
conn = _connect()
_consumers = list()
projects = conn.identity.projects()
for project in projects:
if project['description'].lower() != 'heat stack user project':
_consumers.append(project)
return _consumers
def get_consumer_limits(consumer_id):
conn = _connect()
url = conn.compute.session.get_endpoint(
interface='internal',
service_type='compute'
)
quota_data = conn.compute.session.get(
url + '/os-quota-sets/' + consumer_id
)
quota_data = quota_data.json()
return quota_data['quota_set']
def get_consumer_usage():
conn = _connect()
tenant_kwargs = {'all_tenants': True, 'limit': 5000}
return conn.compute.servers(details=True, **tenant_kwargs)
def get_flavors():
conn = _connect()
flavor_cache = dict()
for flavor in conn.compute.flavors():
entry = flavor_cache[flavor['id']] = dict()
entry['ram'] = flavor['ram']
entry['cores'] = flavor['vcpus']
entry['disk'] = flavor['disk']
return flavor_cache
def main():
return_data = list()
consumer_quota_instance = dict()
consumer_quota_cores = dict()
consumer_quota_ram = dict()
consumer_used_instances = collections.Counter()
consumer_used_cores = collections.Counter()
consumer_used_ram = collections.Counter()
consumer_used_disk = collections.Counter()
consumer_quota_totals = dict()
flavor_cache = get_flavors()
consumer_id_cache = dict()
for consumer in get_consumers():
consumer_name = consumer['name']
consumer_id = consumer['id']
_quota = get_consumer_limits(consumer_id)
consumer_id_cache[consumer_id] = consumer_name
consumer_quota_instance[consumer_name] = int(_quota['instances'])
consumer_quota_cores[consumer_name] = int(_quota['cores'])
consumer_quota_ram[consumer_name] = int(_quota['ram'])
for used_instance in get_consumer_usage():
consumer_name = consumer_id_cache[used_instance['tenant_id']]
consumer_used_instances[consumer_name] += 1
consumer_used_cores[consumer_name] += \
int(flavor_cache[used_instance['flavor']['id']]['cores'])
consumer_used_ram[consumer_name] += \
int(flavor_cache[used_instance['flavor']['id']]['ram'])
consumer_used_disk[consumer_name] += \
int(flavor_cache[used_instance['flavor']['id']]['disk'])
consumer_quota_totals['total_quota_instance'] = sum(
consumer_quota_instance.values()
)
consumer_quota_totals['total_quota_cores'] = sum(
consumer_quota_cores.values()
)
consumer_quota_totals['total_quota_ram'] = sum(
consumer_quota_ram.values()
)
consumer_quota_totals['total_used_instances'] = sum(
consumer_used_instances.values()
)
consumer_quota_totals['total_used_cores'] = sum(
consumer_used_cores.values()
)
consumer_quota_totals['total_used_ram'] = sum(
consumer_used_ram.values()
)
consumer_quota_totals['total_used_disk'] = sum(
consumer_used_disk.values()
)
return_data.append(
line_return(
collection=consumer_quota_instance,
metric_name='consumer_quota_instance'
)
)
return_data.append(
line_return(
collection=consumer_quota_cores,
metric_name='consumer_quota_cores'
)
)
return_data.append(
line_return(
collection=consumer_quota_ram,
metric_name='consumer_quota_ram'
)
)
return_data.append(
line_return(
collection=consumer_used_instances,
metric_name='consumer_used_instances'
)
)
return_data.append(
line_return(
collection=consumer_used_cores,
metric_name='consumer_used_cores'
)
)
return_data.append(
line_return(
collection=consumer_used_ram,
metric_name='consumer_used_ram'
)
)
return_data.append(
line_return(
collection=consumer_used_disk,
metric_name='consumer_used_disk'
)
)
return_data.append(
line_return(
collection=consumer_quota_totals,
metric_name='consumer_quota_totals'
)
)
for item in return_data:
print(item)
if __name__ == '__main__':
main()