Use graduated oslo.policy
This change: - Adds oslo.policy to the requirements list - Accounts for changes in Enforcer initialization - Accounts for changes to config options - Removes incubated version of oslo.policy - Updates the in-tree etc/config files UpgradeImpact Partially Implements Blueprint: graduate-policy Change-Id: I5acb1e0f809098991f05ca3b6d78d4d88d98f2db
This commit is contained in:
parent
cafbe9e69d
commit
cb7d5a4795
@ -301,10 +301,13 @@ image_cache_dir = /var/lib/glance/image-cache/
|
|||||||
|
|
||||||
# =============== Policy Options ==================================
|
# =============== Policy Options ==================================
|
||||||
|
|
||||||
|
[oslo_policy]
|
||||||
# The JSON file that defines policies.
|
# The JSON file that defines policies.
|
||||||
|
# Deprecated group/name - [DEFAULT]/policy_file
|
||||||
#policy_file = policy.json
|
#policy_file = policy.json
|
||||||
|
|
||||||
# Default rule. Enforced when a requested rule is not found.
|
# Default rule. Enforced when a requested rule is not found.
|
||||||
|
# Deprecated group/name - [DEFAULT]/policy_default_rule
|
||||||
#policy_default_rule = default
|
#policy_default_rule = default
|
||||||
|
|
||||||
# Directories where policy configuration files are stored.
|
# Directories where policy configuration files are stored.
|
||||||
@ -312,6 +315,7 @@ image_cache_dir = /var/lib/glance/image-cache/
|
|||||||
# defined by the config_dir option, or absolute paths.
|
# defined by the config_dir option, or absolute paths.
|
||||||
# The file defined by policy_file must exist for these
|
# The file defined by policy_file must exist for these
|
||||||
# directories to be searched.
|
# directories to be searched.
|
||||||
|
# Deprecated group/name - [DEFAULT]/policy_dirs
|
||||||
#policy_dirs = policy.d
|
#policy_dirs = policy.d
|
||||||
|
|
||||||
# =============== Database Options =================================
|
# =============== Database Options =================================
|
||||||
|
@ -247,10 +247,13 @@ s3_store_create_bucket_on_put = False
|
|||||||
|
|
||||||
# =============== Policy Options ==============================
|
# =============== Policy Options ==============================
|
||||||
|
|
||||||
|
[oslo_policy]
|
||||||
# The JSON file that defines policies.
|
# The JSON file that defines policies.
|
||||||
|
# Deprecated group/name - [DEFAULT]/policy_file
|
||||||
#policy_file = policy.json
|
#policy_file = policy.json
|
||||||
|
|
||||||
# Default rule. Enforced when a requested rule is not found.
|
# Default rule. Enforced when a requested rule is not found.
|
||||||
|
# Deprecated group/name - [DEFAULT]/policy_default_rule
|
||||||
#policy_default_rule = default
|
#policy_default_rule = default
|
||||||
|
|
||||||
# Directories where policy configuration files are stored.
|
# Directories where policy configuration files are stored.
|
||||||
@ -258,4 +261,5 @@ s3_store_create_bucket_on_put = False
|
|||||||
# defined by the config_dir option, or absolute paths.
|
# defined by the config_dir option, or absolute paths.
|
||||||
# The file defined by policy_file must exist for these
|
# The file defined by policy_file must exist for these
|
||||||
# directories to be searched.
|
# directories to be searched.
|
||||||
|
# Deprecated group/name - [DEFAULT]/policy_dirs
|
||||||
#policy_dirs = policy.d
|
#policy_dirs = policy.d
|
||||||
|
@ -127,10 +127,13 @@ qpid_tcp_nodelay = True
|
|||||||
|
|
||||||
# =============== Policy Options ==============================
|
# =============== Policy Options ==============================
|
||||||
|
|
||||||
|
[oslo_policy]
|
||||||
# The JSON file that defines policies.
|
# The JSON file that defines policies.
|
||||||
|
# Deprecated group/name - [DEFAULT]/policy_file
|
||||||
#policy_file = policy.json
|
#policy_file = policy.json
|
||||||
|
|
||||||
# Default rule. Enforced when a requested rule is not found.
|
# Default rule. Enforced when a requested rule is not found.
|
||||||
|
# Deprecated group/name - [DEFAULT]/policy_default_rule
|
||||||
#policy_default_rule = default
|
#policy_default_rule = default
|
||||||
|
|
||||||
# Directories where policy configuration files are stored.
|
# Directories where policy configuration files are stored.
|
||||||
@ -138,6 +141,7 @@ qpid_tcp_nodelay = True
|
|||||||
# defined by the config_dir option, or absolute paths.
|
# defined by the config_dir option, or absolute paths.
|
||||||
# The file defined by policy_file must exist for these
|
# The file defined by policy_file must exist for these
|
||||||
# directories to be searched.
|
# directories to be searched.
|
||||||
|
# Deprecated group/name - [DEFAULT]/policy_dirs
|
||||||
#policy_dirs = policy.d
|
#policy_dirs = policy.d
|
||||||
|
|
||||||
# ================= Database Options ==========================
|
# ================= Database Options ==========================
|
||||||
|
@ -6,5 +6,6 @@ namespace = oslo.concurrency
|
|||||||
namespace = oslo.messaging
|
namespace = oslo.messaging
|
||||||
namespace = oslo.db
|
namespace = oslo.db
|
||||||
namespace = oslo.db.concurrency
|
namespace = oslo.db.concurrency
|
||||||
|
namespace = oslo.policy
|
||||||
namespace = keystoneclient.middleware.auth_token
|
namespace = keystoneclient.middleware.auth_token
|
||||||
namespace = oslo.log
|
namespace = oslo.log
|
||||||
|
@ -2,3 +2,4 @@
|
|||||||
output_file = etc/glance-cache.conf.sample
|
output_file = etc/glance-cache.conf.sample
|
||||||
namespace = glance.cache
|
namespace = glance.cache
|
||||||
namespace = oslo.log
|
namespace = oslo.log
|
||||||
|
namespace = oslo.policy
|
||||||
|
@ -5,5 +5,6 @@ namespace = glance.store
|
|||||||
namespace = oslo.messaging
|
namespace = oslo.messaging
|
||||||
namespace = oslo.db
|
namespace = oslo.db
|
||||||
namespace = oslo.db.concurrency
|
namespace = oslo.db.concurrency
|
||||||
|
namespace = oslo.policy
|
||||||
namespace = keystoneclient.middleware.auth_token
|
namespace = keystoneclient.middleware.auth_token
|
||||||
namespace = oslo.log
|
namespace = oslo.log
|
||||||
|
@ -5,3 +5,4 @@ namespace = oslo.concurrency
|
|||||||
namespace = oslo.db
|
namespace = oslo.db
|
||||||
namespace = oslo.db.concurrency
|
namespace = oslo.db.concurrency
|
||||||
namespace = oslo.log
|
namespace = oslo.log
|
||||||
|
namespace = oslo.policy
|
||||||
|
@ -20,21 +20,21 @@ import copy
|
|||||||
|
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
from oslo_policy import policy
|
||||||
|
|
||||||
from glance.common import exception
|
from glance.common import exception
|
||||||
import glance.domain.proxy
|
import glance.domain.proxy
|
||||||
from glance import i18n
|
from glance import i18n
|
||||||
from glance.openstack.common import policy
|
|
||||||
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
LOG = logging.getLogger(__name__)
|
||||||
CONF = cfg.CONF
|
CONF = cfg.CONF
|
||||||
|
|
||||||
DEFAULT_RULES = {
|
DEFAULT_RULES = policy.Rules.from_dict({
|
||||||
'context_is_admin': policy.RoleCheck('role', 'admin'),
|
'context_is_admin': 'role:admin',
|
||||||
'default': policy.TrueCheck(),
|
'default': '@',
|
||||||
'manage_image_cache': policy.RoleCheck('role', 'admin'),
|
'manage_image_cache': 'role:admin',
|
||||||
}
|
})
|
||||||
|
|
||||||
_ = i18n._
|
_ = i18n._
|
||||||
_LI = i18n._LI
|
_LI = i18n._LI
|
||||||
@ -45,11 +45,11 @@ class Enforcer(policy.Enforcer):
|
|||||||
"""Responsible for loading and enforcing rules"""
|
"""Responsible for loading and enforcing rules"""
|
||||||
|
|
||||||
def __init__(self):
|
def __init__(self):
|
||||||
if CONF.find_file(CONF.policy_file):
|
if CONF.find_file(CONF.oslo_policy.policy_file):
|
||||||
kwargs = dict(rules=None, use_conf=True)
|
kwargs = dict(rules=None, use_conf=True)
|
||||||
else:
|
else:
|
||||||
kwargs = dict(rules=DEFAULT_RULES, use_conf=False)
|
kwargs = dict(rules=DEFAULT_RULES, use_conf=False)
|
||||||
super(Enforcer, self).__init__(overwrite=False, **kwargs)
|
super(Enforcer, self).__init__(CONF, overwrite=False, **kwargs)
|
||||||
|
|
||||||
def add_rules(self, rules):
|
def add_rules(self, rules):
|
||||||
"""Add new rules to the Rules object"""
|
"""Add new rules to the Rules object"""
|
||||||
|
@ -27,6 +27,7 @@ import tempfile
|
|||||||
|
|
||||||
from oslo_concurrency import lockutils
|
from oslo_concurrency import lockutils
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
|
from oslo_policy import policy
|
||||||
from paste import deploy
|
from paste import deploy
|
||||||
|
|
||||||
from glance import i18n
|
from glance import i18n
|
||||||
@ -180,6 +181,7 @@ CONF.register_opts(paste_deploy_opts, group='paste_deploy')
|
|||||||
CONF.register_opts(image_format_opts, group='image_format')
|
CONF.register_opts(image_format_opts, group='image_format')
|
||||||
CONF.register_opts(task_opts, group='task')
|
CONF.register_opts(task_opts, group='task')
|
||||||
CONF.register_opts(common_opts)
|
CONF.register_opts(common_opts)
|
||||||
|
policy.Enforcer(CONF)
|
||||||
|
|
||||||
|
|
||||||
def parse_args(args=None, usage=None, default_config_files=None):
|
def parse_args(args=None, usage=None, default_config_files=None):
|
||||||
|
@ -21,11 +21,11 @@ except ImportError:
|
|||||||
|
|
||||||
from oslo_config import cfg
|
from oslo_config import cfg
|
||||||
from oslo_log import log as logging
|
from oslo_log import log as logging
|
||||||
|
from oslo_policy import policy
|
||||||
|
|
||||||
import glance.api.policy
|
import glance.api.policy
|
||||||
from glance.common import exception
|
from glance.common import exception
|
||||||
from glance import i18n
|
from glance import i18n
|
||||||
from glance.openstack.common import policy
|
|
||||||
|
|
||||||
# NOTE(bourke): The default dict_type is collections.OrderedDict in py27, but
|
# NOTE(bourke): The default dict_type is collections.OrderedDict in py27, but
|
||||||
# we must set manually for compatibility with py26
|
# we must set manually for compatibility with py26
|
||||||
@ -164,8 +164,9 @@ class PropertyRules(object):
|
|||||||
"""
|
"""
|
||||||
rule = "rule:%s" % rule
|
rule = "rule:%s" % rule
|
||||||
rule_name = "%s:%s" % (property_exp, action)
|
rule_name = "%s:%s" % (property_exp, action)
|
||||||
rule_dict = {}
|
rule_dict = policy.Rules.from_dict({
|
||||||
rule_dict[rule_name] = policy.parse_rule(rule)
|
rule_name: rule
|
||||||
|
})
|
||||||
self.policy_enforcer.add_rules(rule_dict)
|
self.policy_enforcer.add_rules(rule_dict)
|
||||||
|
|
||||||
def _check_policy(self, property_exp, action, context):
|
def _check_policy(self, property_exp, action, context):
|
||||||
|
@ -1,963 +0,0 @@
|
|||||||
# -*- coding: utf-8 -*-
|
|
||||||
#
|
|
||||||
# Copyright (c) 2012 OpenStack Foundation.
|
|
||||||
# All Rights Reserved.
|
|
||||||
#
|
|
||||||
# Licensed under the Apache License, Version 2.0 (the "License"); you may
|
|
||||||
# not use this file except in compliance with the License. You may obtain
|
|
||||||
# a copy of the License at
|
|
||||||
#
|
|
||||||
# http://www.apache.org/licenses/LICENSE-2.0
|
|
||||||
#
|
|
||||||
# Unless required by applicable law or agreed to in writing, software
|
|
||||||
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
|
|
||||||
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
|
|
||||||
# License for the specific language governing permissions and limitations
|
|
||||||
# under the License.
|
|
||||||
|
|
||||||
"""
|
|
||||||
Common Policy Engine Implementation
|
|
||||||
|
|
||||||
Policies can be expressed in one of two forms: A list of lists, or a
|
|
||||||
string written in the new policy language.
|
|
||||||
|
|
||||||
In the list-of-lists representation, each check inside the innermost
|
|
||||||
list is combined as with an "and" conjunction--for that check to pass,
|
|
||||||
all the specified checks must pass. These innermost lists are then
|
|
||||||
combined as with an "or" conjunction. As an example, take the following
|
|
||||||
rule, expressed in the list-of-lists representation::
|
|
||||||
|
|
||||||
[["role:admin"], ["project_id:%(project_id)s", "role:projectadmin"]]
|
|
||||||
|
|
||||||
This is the original way of expressing policies, but there now exists a
|
|
||||||
new way: the policy language.
|
|
||||||
|
|
||||||
In the policy language, each check is specified the same way as in the
|
|
||||||
list-of-lists representation: a simple "a:b" pair that is matched to
|
|
||||||
the correct class to perform that check::
|
|
||||||
|
|
||||||
+===========================================================================+
|
|
||||||
| TYPE | SYNTAX |
|
|
||||||
+===========================================================================+
|
|
||||||
|User's Role | role:admin |
|
|
||||||
+---------------------------------------------------------------------------+
|
|
||||||
|Rules already defined on policy | rule:admin_required |
|
|
||||||
+---------------------------------------------------------------------------+
|
|
||||||
|Against URL's¹ | http://my-url.org/check |
|
|
||||||
+---------------------------------------------------------------------------+
|
|
||||||
|User attributes² | project_id:%(target.project.id)s |
|
|
||||||
+---------------------------------------------------------------------------+
|
|
||||||
|Strings | <variable>:'xpto2035abc' |
|
|
||||||
| | 'myproject':<variable> |
|
|
||||||
+---------------------------------------------------------------------------+
|
|
||||||
| | project_id:xpto2035abc |
|
|
||||||
|Literals | domain_id:20 |
|
|
||||||
| | True:%(user.enabled)s |
|
|
||||||
+===========================================================================+
|
|
||||||
|
|
||||||
¹URL checking must return 'True' to be valid
|
|
||||||
²User attributes (obtained through the token): user_id, domain_id or project_id
|
|
||||||
|
|
||||||
Conjunction operators are available, allowing for more expressiveness
|
|
||||||
in crafting policies. So, in the policy language, the previous check in
|
|
||||||
list-of-lists becomes::
|
|
||||||
|
|
||||||
role:admin or (project_id:%(project_id)s and role:projectadmin)
|
|
||||||
|
|
||||||
The policy language also has the "not" operator, allowing a richer
|
|
||||||
policy rule::
|
|
||||||
|
|
||||||
project_id:%(project_id)s and not role:dunce
|
|
||||||
|
|
||||||
Attributes sent along with API calls can be used by the policy engine
|
|
||||||
(on the right side of the expression), by using the following syntax::
|
|
||||||
|
|
||||||
<some_value>:%(user.id)s
|
|
||||||
|
|
||||||
Contextual attributes of objects identified by their IDs are loaded
|
|
||||||
from the database. They are also available to the policy engine and
|
|
||||||
can be checked through the `target` keyword::
|
|
||||||
|
|
||||||
<some_value>:%(target.role.name)s
|
|
||||||
|
|
||||||
Finally, two special policy checks should be mentioned; the policy
|
|
||||||
check "@" will always accept an access, and the policy check "!" will
|
|
||||||
always reject an access. (Note that if a rule is either the empty
|
|
||||||
list ("[]") or the empty string, this is equivalent to the "@" policy
|
|
||||||
check.) Of these, the "!" policy check is probably the most useful,
|
|
||||||
as it allows particular rules to be explicitly disabled.
|
|
||||||
"""
|
|
||||||
|
|
||||||
import abc
|
|
||||||
import ast
|
|
||||||
import copy
|
|
||||||
import logging
|
|
||||||
import os
|
|
||||||
import re
|
|
||||||
|
|
||||||
from oslo_config import cfg
|
|
||||||
from oslo_serialization import jsonutils
|
|
||||||
import six
|
|
||||||
import six.moves.urllib.parse as urlparse
|
|
||||||
import six.moves.urllib.request as urlrequest
|
|
||||||
|
|
||||||
from glance.openstack.common import fileutils
|
|
||||||
from glance.openstack.common._i18n import _, _LE
|
|
||||||
|
|
||||||
|
|
||||||
policy_opts = [
|
|
||||||
cfg.StrOpt('policy_file',
|
|
||||||
default='policy.json',
|
|
||||||
help=_('The JSON file that defines policies.')),
|
|
||||||
cfg.StrOpt('policy_default_rule',
|
|
||||||
default='default',
|
|
||||||
help=_('Default rule. Enforced when a requested rule is not '
|
|
||||||
'found.')),
|
|
||||||
cfg.MultiStrOpt('policy_dirs',
|
|
||||||
default=['policy.d'],
|
|
||||||
help=_('Directories where policy configuration files are '
|
|
||||||
'stored. They can be relative to any directory '
|
|
||||||
'in the search path defined by the config_dir '
|
|
||||||
'option, or absolute paths. The file defined by '
|
|
||||||
'policy_file must exist for these directories to '
|
|
||||||
'be searched. Missing or empty directories are '
|
|
||||||
'ignored.')),
|
|
||||||
]
|
|
||||||
|
|
||||||
CONF = cfg.CONF
|
|
||||||
CONF.register_opts(policy_opts)
|
|
||||||
|
|
||||||
LOG = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
_checks = {}
|
|
||||||
|
|
||||||
|
|
||||||
def list_opts():
|
|
||||||
"""Entry point for oslo-config-generator."""
|
|
||||||
return [(None, copy.deepcopy(policy_opts))]
|
|
||||||
|
|
||||||
|
|
||||||
class PolicyNotAuthorized(Exception):
|
|
||||||
|
|
||||||
def __init__(self, rule):
|
|
||||||
msg = _("Policy doesn't allow %s to be performed.") % rule
|
|
||||||
super(PolicyNotAuthorized, self).__init__(msg)
|
|
||||||
|
|
||||||
|
|
||||||
class Rules(dict):
|
|
||||||
"""A store for rules. Handles the default_rule setting directly."""
|
|
||||||
|
|
||||||
@classmethod
|
|
||||||
def load_json(cls, data, default_rule=None):
|
|
||||||
"""Allow loading of JSON rule data."""
|
|
||||||
|
|
||||||
# Suck in the JSON data and parse the rules
|
|
||||||
rules = dict((k, parse_rule(v)) for k, v in
|
|
||||||
jsonutils.loads(data).items())
|
|
||||||
|
|
||||||
return cls(rules, default_rule)
|
|
||||||
|
|
||||||
def __init__(self, rules=None, default_rule=None):
|
|
||||||
"""Initialize the Rules store."""
|
|
||||||
|
|
||||||
super(Rules, self).__init__(rules or {})
|
|
||||||
self.default_rule = default_rule
|
|
||||||
|
|
||||||
def __missing__(self, key):
|
|
||||||
"""Implements the default rule handling."""
|
|
||||||
|
|
||||||
if isinstance(self.default_rule, dict):
|
|
||||||
raise KeyError(key)
|
|
||||||
|
|
||||||
# If the default rule isn't actually defined, do something
|
|
||||||
# reasonably intelligent
|
|
||||||
if not self.default_rule:
|
|
||||||
raise KeyError(key)
|
|
||||||
|
|
||||||
if isinstance(self.default_rule, BaseCheck):
|
|
||||||
return self.default_rule
|
|
||||||
|
|
||||||
# We need to check this or we can get infinite recursion
|
|
||||||
if self.default_rule not in self:
|
|
||||||
raise KeyError(key)
|
|
||||||
|
|
||||||
elif isinstance(self.default_rule, six.string_types):
|
|
||||||
return self[self.default_rule]
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
"""Dumps a string representation of the rules."""
|
|
||||||
|
|
||||||
# Start by building the canonical strings for the rules
|
|
||||||
out_rules = {}
|
|
||||||
for key, value in self.items():
|
|
||||||
# Use empty string for singleton TrueCheck instances
|
|
||||||
if isinstance(value, TrueCheck):
|
|
||||||
out_rules[key] = ''
|
|
||||||
else:
|
|
||||||
out_rules[key] = str(value)
|
|
||||||
|
|
||||||
# Dump a pretty-printed JSON representation
|
|
||||||
return jsonutils.dumps(out_rules, indent=4)
|
|
||||||
|
|
||||||
|
|
||||||
class Enforcer(object):
|
|
||||||
"""Responsible for loading and enforcing rules.
|
|
||||||
|
|
||||||
:param policy_file: Custom policy file to use, if none is
|
|
||||||
specified, `CONF.policy_file` will be
|
|
||||||
used.
|
|
||||||
:param rules: Default dictionary / Rules to use. It will be
|
|
||||||
considered just in the first instantiation. If
|
|
||||||
`load_rules(True)`, `clear()` or `set_rules(True)`
|
|
||||||
is called this will be overwritten.
|
|
||||||
:param default_rule: Default rule to use, CONF.default_rule will
|
|
||||||
be used if none is specified.
|
|
||||||
:param use_conf: Whether to load rules from cache or config file.
|
|
||||||
:param overwrite: Whether to overwrite existing rules when reload rules
|
|
||||||
from config file.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, policy_file=None, rules=None,
|
|
||||||
default_rule=None, use_conf=True, overwrite=True):
|
|
||||||
self.default_rule = default_rule or CONF.policy_default_rule
|
|
||||||
self.rules = Rules(rules, self.default_rule)
|
|
||||||
|
|
||||||
self.policy_path = None
|
|
||||||
self.policy_file = policy_file or CONF.policy_file
|
|
||||||
self.use_conf = use_conf
|
|
||||||
self.overwrite = overwrite
|
|
||||||
|
|
||||||
def set_rules(self, rules, overwrite=True, use_conf=False):
|
|
||||||
"""Create a new Rules object based on the provided dict of rules.
|
|
||||||
|
|
||||||
:param rules: New rules to use. It should be an instance of dict.
|
|
||||||
:param overwrite: Whether to overwrite current rules or update them
|
|
||||||
with the new rules.
|
|
||||||
:param use_conf: Whether to reload rules from cache or config file.
|
|
||||||
"""
|
|
||||||
|
|
||||||
if not isinstance(rules, dict):
|
|
||||||
raise TypeError(_("Rules must be an instance of dict or Rules, "
|
|
||||||
"got %s instead") % type(rules))
|
|
||||||
self.use_conf = use_conf
|
|
||||||
if overwrite:
|
|
||||||
self.rules = Rules(rules, self.default_rule)
|
|
||||||
else:
|
|
||||||
self.rules.update(rules)
|
|
||||||
|
|
||||||
def clear(self):
|
|
||||||
"""Clears Enforcer rules, policy's cache and policy's path."""
|
|
||||||
self.set_rules({})
|
|
||||||
fileutils.delete_cached_file(self.policy_path)
|
|
||||||
self.default_rule = None
|
|
||||||
self.policy_path = None
|
|
||||||
|
|
||||||
def load_rules(self, force_reload=False):
|
|
||||||
"""Loads policy_path's rules.
|
|
||||||
|
|
||||||
Policy file is cached and will be reloaded if modified.
|
|
||||||
|
|
||||||
:param force_reload: Whether to reload rules from config file.
|
|
||||||
"""
|
|
||||||
|
|
||||||
if force_reload:
|
|
||||||
self.use_conf = force_reload
|
|
||||||
|
|
||||||
if self.use_conf:
|
|
||||||
if not self.policy_path:
|
|
||||||
self.policy_path = self._get_policy_path(self.policy_file)
|
|
||||||
|
|
||||||
self._load_policy_file(self.policy_path, force_reload,
|
|
||||||
overwrite=self.overwrite)
|
|
||||||
for path in CONF.policy_dirs:
|
|
||||||
try:
|
|
||||||
path = self._get_policy_path(path)
|
|
||||||
except cfg.ConfigFilesNotFoundError:
|
|
||||||
continue
|
|
||||||
self._walk_through_policy_directory(path,
|
|
||||||
self._load_policy_file,
|
|
||||||
force_reload, False)
|
|
||||||
|
|
||||||
@staticmethod
|
|
||||||
def _walk_through_policy_directory(path, func, *args):
|
|
||||||
# We do not iterate over sub-directories.
|
|
||||||
policy_files = next(os.walk(path))[2]
|
|
||||||
policy_files.sort()
|
|
||||||
for policy_file in [p for p in policy_files if not p.startswith('.')]:
|
|
||||||
func(os.path.join(path, policy_file), *args)
|
|
||||||
|
|
||||||
def _load_policy_file(self, path, force_reload, overwrite=True):
|
|
||||||
reloaded, data = fileutils.read_cached_file(
|
|
||||||
path, force_reload=force_reload)
|
|
||||||
if reloaded or not self.rules or not overwrite:
|
|
||||||
rules = Rules.load_json(data, self.default_rule)
|
|
||||||
self.set_rules(rules, overwrite=overwrite, use_conf=True)
|
|
||||||
LOG.debug("Reloaded policy file: %(path)s",
|
|
||||||
{'path': path})
|
|
||||||
|
|
||||||
def _get_policy_path(self, path):
|
|
||||||
"""Locate the policy json data file/path.
|
|
||||||
|
|
||||||
:param path: It's value can be a full path or related path. When
|
|
||||||
full path specified, this function just returns the full
|
|
||||||
path. When related path specified, this function will
|
|
||||||
search configuration directories to find one that exists.
|
|
||||||
|
|
||||||
:returns: The policy path
|
|
||||||
|
|
||||||
:raises: ConfigFilesNotFoundError if the file/path couldn't
|
|
||||||
be located.
|
|
||||||
"""
|
|
||||||
policy_path = CONF.find_file(path)
|
|
||||||
|
|
||||||
if policy_path:
|
|
||||||
return policy_path
|
|
||||||
|
|
||||||
raise cfg.ConfigFilesNotFoundError((path,))
|
|
||||||
|
|
||||||
def enforce(self, rule, target, creds, do_raise=False,
|
|
||||||
exc=None, *args, **kwargs):
|
|
||||||
"""Checks authorization of a rule against the target and credentials.
|
|
||||||
|
|
||||||
:param rule: A string or BaseCheck instance specifying the rule
|
|
||||||
to evaluate.
|
|
||||||
:param target: As much information about the object being operated
|
|
||||||
on as possible, as a dictionary.
|
|
||||||
:param creds: As much information about the user performing the
|
|
||||||
action as possible, as a dictionary.
|
|
||||||
:param do_raise: Whether to raise an exception or not if check
|
|
||||||
fails.
|
|
||||||
:param exc: Class of the exception to raise if the check fails.
|
|
||||||
Any remaining arguments passed to enforce() (both
|
|
||||||
positional and keyword arguments) will be passed to
|
|
||||||
the exception class. If not specified, PolicyNotAuthorized
|
|
||||||
will be used.
|
|
||||||
|
|
||||||
:return: Returns False if the policy does not allow the action and
|
|
||||||
exc is not provided; otherwise, returns a value that
|
|
||||||
evaluates to True. Note: for rules using the "case"
|
|
||||||
expression, this True value will be the specified string
|
|
||||||
from the expression.
|
|
||||||
"""
|
|
||||||
|
|
||||||
self.load_rules()
|
|
||||||
|
|
||||||
# Allow the rule to be a Check tree
|
|
||||||
if isinstance(rule, BaseCheck):
|
|
||||||
result = rule(target, creds, self)
|
|
||||||
elif not self.rules:
|
|
||||||
# No rules to reference means we're going to fail closed
|
|
||||||
result = False
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
# Evaluate the rule
|
|
||||||
result = self.rules[rule](target, creds, self)
|
|
||||||
except KeyError:
|
|
||||||
LOG.debug("Rule [%s] doesn't exist" % rule)
|
|
||||||
# If the rule doesn't exist, fail closed
|
|
||||||
result = False
|
|
||||||
|
|
||||||
# If it is False, raise the exception if requested
|
|
||||||
if do_raise and not result:
|
|
||||||
if exc:
|
|
||||||
raise exc(*args, **kwargs)
|
|
||||||
|
|
||||||
raise PolicyNotAuthorized(rule)
|
|
||||||
|
|
||||||
return result
|
|
||||||
|
|
||||||
|
|
||||||
@six.add_metaclass(abc.ABCMeta)
|
|
||||||
class BaseCheck(object):
|
|
||||||
"""Abstract base class for Check classes."""
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def __str__(self):
|
|
||||||
"""String representation of the Check tree rooted at this node."""
|
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
@abc.abstractmethod
|
|
||||||
def __call__(self, target, cred, enforcer):
|
|
||||||
"""Triggers if instance of the class is called.
|
|
||||||
|
|
||||||
Performs the check. Returns False to reject the access or a
|
|
||||||
true value (not necessary True) to accept the access.
|
|
||||||
"""
|
|
||||||
|
|
||||||
pass
|
|
||||||
|
|
||||||
|
|
||||||
class FalseCheck(BaseCheck):
|
|
||||||
"""A policy check that always returns False (disallow)."""
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
"""Return a string representation of this check."""
|
|
||||||
|
|
||||||
return "!"
|
|
||||||
|
|
||||||
def __call__(self, target, cred, enforcer):
|
|
||||||
"""Check the policy."""
|
|
||||||
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
class TrueCheck(BaseCheck):
|
|
||||||
"""A policy check that always returns True (allow)."""
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
"""Return a string representation of this check."""
|
|
||||||
|
|
||||||
return "@"
|
|
||||||
|
|
||||||
def __call__(self, target, cred, enforcer):
|
|
||||||
"""Check the policy."""
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
class Check(BaseCheck):
|
|
||||||
"""A base class to allow for user-defined policy checks."""
|
|
||||||
|
|
||||||
def __init__(self, kind, match):
|
|
||||||
"""Initiates Check instance.
|
|
||||||
|
|
||||||
:param kind: The kind of the check, i.e., the field before the
|
|
||||||
':'.
|
|
||||||
:param match: The match of the check, i.e., the field after
|
|
||||||
the ':'.
|
|
||||||
"""
|
|
||||||
|
|
||||||
self.kind = kind
|
|
||||||
self.match = match
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
"""Return a string representation of this check."""
|
|
||||||
|
|
||||||
return "%s:%s" % (self.kind, self.match)
|
|
||||||
|
|
||||||
|
|
||||||
class NotCheck(BaseCheck):
|
|
||||||
"""Implements the "not" logical operator.
|
|
||||||
|
|
||||||
A policy check that inverts the result of another policy check.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, rule):
|
|
||||||
"""Initialize the 'not' check.
|
|
||||||
|
|
||||||
:param rule: The rule to negate. Must be a Check.
|
|
||||||
"""
|
|
||||||
|
|
||||||
self.rule = rule
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
"""Return a string representation of this check."""
|
|
||||||
|
|
||||||
return "not %s" % self.rule
|
|
||||||
|
|
||||||
def __call__(self, target, cred, enforcer):
|
|
||||||
"""Check the policy.
|
|
||||||
|
|
||||||
Returns the logical inverse of the wrapped check.
|
|
||||||
"""
|
|
||||||
|
|
||||||
return not self.rule(target, cred, enforcer)
|
|
||||||
|
|
||||||
|
|
||||||
class AndCheck(BaseCheck):
|
|
||||||
"""Implements the "and" logical operator.
|
|
||||||
|
|
||||||
A policy check that requires that a list of other checks all return True.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, rules):
|
|
||||||
"""Initialize the 'and' check.
|
|
||||||
|
|
||||||
:param rules: A list of rules that will be tested.
|
|
||||||
"""
|
|
||||||
|
|
||||||
self.rules = rules
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
"""Return a string representation of this check."""
|
|
||||||
|
|
||||||
return "(%s)" % ' and '.join(str(r) for r in self.rules)
|
|
||||||
|
|
||||||
def __call__(self, target, cred, enforcer):
|
|
||||||
"""Check the policy.
|
|
||||||
|
|
||||||
Requires that all rules accept in order to return True.
|
|
||||||
"""
|
|
||||||
|
|
||||||
for rule in self.rules:
|
|
||||||
if not rule(target, cred, enforcer):
|
|
||||||
return False
|
|
||||||
|
|
||||||
return True
|
|
||||||
|
|
||||||
def add_check(self, rule):
|
|
||||||
"""Adds rule to be tested.
|
|
||||||
|
|
||||||
Allows addition of another rule to the list of rules that will
|
|
||||||
be tested. Returns the AndCheck object for convenience.
|
|
||||||
"""
|
|
||||||
|
|
||||||
self.rules.append(rule)
|
|
||||||
return self
|
|
||||||
|
|
||||||
|
|
||||||
class OrCheck(BaseCheck):
|
|
||||||
"""Implements the "or" operator.
|
|
||||||
|
|
||||||
A policy check that requires that at least one of a list of other
|
|
||||||
checks returns True.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, rules):
|
|
||||||
"""Initialize the 'or' check.
|
|
||||||
|
|
||||||
:param rules: A list of rules that will be tested.
|
|
||||||
"""
|
|
||||||
|
|
||||||
self.rules = rules
|
|
||||||
|
|
||||||
def __str__(self):
|
|
||||||
"""Return a string representation of this check."""
|
|
||||||
|
|
||||||
return "(%s)" % ' or '.join(str(r) for r in self.rules)
|
|
||||||
|
|
||||||
def __call__(self, target, cred, enforcer):
|
|
||||||
"""Check the policy.
|
|
||||||
|
|
||||||
Requires that at least one rule accept in order to return True.
|
|
||||||
"""
|
|
||||||
|
|
||||||
for rule in self.rules:
|
|
||||||
if rule(target, cred, enforcer):
|
|
||||||
return True
|
|
||||||
return False
|
|
||||||
|
|
||||||
def add_check(self, rule):
|
|
||||||
"""Adds rule to be tested.
|
|
||||||
|
|
||||||
Allows addition of another rule to the list of rules that will
|
|
||||||
be tested. Returns the OrCheck object for convenience.
|
|
||||||
"""
|
|
||||||
|
|
||||||
self.rules.append(rule)
|
|
||||||
return self
|
|
||||||
|
|
||||||
|
|
||||||
def _parse_check(rule):
|
|
||||||
"""Parse a single base check rule into an appropriate Check object."""
|
|
||||||
|
|
||||||
# Handle the special checks
|
|
||||||
if rule == '!':
|
|
||||||
return FalseCheck()
|
|
||||||
elif rule == '@':
|
|
||||||
return TrueCheck()
|
|
||||||
|
|
||||||
try:
|
|
||||||
kind, match = rule.split(':', 1)
|
|
||||||
except Exception:
|
|
||||||
LOG.exception(_LE("Failed to understand rule %s") % rule)
|
|
||||||
# If the rule is invalid, we'll fail closed
|
|
||||||
return FalseCheck()
|
|
||||||
|
|
||||||
# Find what implements the check
|
|
||||||
if kind in _checks:
|
|
||||||
return _checks[kind](kind, match)
|
|
||||||
elif None in _checks:
|
|
||||||
return _checks[None](kind, match)
|
|
||||||
else:
|
|
||||||
LOG.error(_LE("No handler for matches of kind %s") % kind)
|
|
||||||
return FalseCheck()
|
|
||||||
|
|
||||||
|
|
||||||
def _parse_list_rule(rule):
|
|
||||||
"""Translates the old list-of-lists syntax into a tree of Check objects.
|
|
||||||
|
|
||||||
Provided for backwards compatibility.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Empty rule defaults to True
|
|
||||||
if not rule:
|
|
||||||
return TrueCheck()
|
|
||||||
|
|
||||||
# Outer list is joined by "or"; inner list by "and"
|
|
||||||
or_list = []
|
|
||||||
for inner_rule in rule:
|
|
||||||
# Elide empty inner lists
|
|
||||||
if not inner_rule:
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Handle bare strings
|
|
||||||
if isinstance(inner_rule, six.string_types):
|
|
||||||
inner_rule = [inner_rule]
|
|
||||||
|
|
||||||
# Parse the inner rules into Check objects
|
|
||||||
and_list = [_parse_check(r) for r in inner_rule]
|
|
||||||
|
|
||||||
# Append the appropriate check to the or_list
|
|
||||||
if len(and_list) == 1:
|
|
||||||
or_list.append(and_list[0])
|
|
||||||
else:
|
|
||||||
or_list.append(AndCheck(and_list))
|
|
||||||
|
|
||||||
# If we have only one check, omit the "or"
|
|
||||||
if not or_list:
|
|
||||||
return FalseCheck()
|
|
||||||
elif len(or_list) == 1:
|
|
||||||
return or_list[0]
|
|
||||||
|
|
||||||
return OrCheck(or_list)
|
|
||||||
|
|
||||||
|
|
||||||
# Used for tokenizing the policy language
|
|
||||||
_tokenize_re = re.compile(r'\s+')
|
|
||||||
|
|
||||||
|
|
||||||
def _parse_tokenize(rule):
|
|
||||||
"""Tokenizer for the policy language.
|
|
||||||
|
|
||||||
Most of the single-character tokens are specified in the
|
|
||||||
_tokenize_re; however, parentheses need to be handled specially,
|
|
||||||
because they can appear inside a check string. Thankfully, those
|
|
||||||
parentheses that appear inside a check string can never occur at
|
|
||||||
the very beginning or end ("%(variable)s" is the correct syntax).
|
|
||||||
"""
|
|
||||||
|
|
||||||
for tok in _tokenize_re.split(rule):
|
|
||||||
# Skip empty tokens
|
|
||||||
if not tok or tok.isspace():
|
|
||||||
continue
|
|
||||||
|
|
||||||
# Handle leading parens on the token
|
|
||||||
clean = tok.lstrip('(')
|
|
||||||
for i in range(len(tok) - len(clean)):
|
|
||||||
yield '(', '('
|
|
||||||
|
|
||||||
# If it was only parentheses, continue
|
|
||||||
if not clean:
|
|
||||||
continue
|
|
||||||
else:
|
|
||||||
tok = clean
|
|
||||||
|
|
||||||
# Handle trailing parens on the token
|
|
||||||
clean = tok.rstrip(')')
|
|
||||||
trail = len(tok) - len(clean)
|
|
||||||
|
|
||||||
# Yield the cleaned token
|
|
||||||
lowered = clean.lower()
|
|
||||||
if lowered in ('and', 'or', 'not'):
|
|
||||||
# Special tokens
|
|
||||||
yield lowered, clean
|
|
||||||
elif clean:
|
|
||||||
# Not a special token, but not composed solely of ')'
|
|
||||||
if len(tok) >= 2 and ((tok[0], tok[-1]) in
|
|
||||||
[('"', '"'), ("'", "'")]):
|
|
||||||
# It's a quoted string
|
|
||||||
yield 'string', tok[1:-1]
|
|
||||||
else:
|
|
||||||
yield 'check', _parse_check(clean)
|
|
||||||
|
|
||||||
# Yield the trailing parens
|
|
||||||
for i in range(trail):
|
|
||||||
yield ')', ')'
|
|
||||||
|
|
||||||
|
|
||||||
class ParseStateMeta(type):
|
|
||||||
"""Metaclass for the ParseState class.
|
|
||||||
|
|
||||||
Facilitates identifying reduction methods.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __new__(mcs, name, bases, cls_dict):
|
|
||||||
"""Create the class.
|
|
||||||
|
|
||||||
Injects the 'reducers' list, a list of tuples matching token sequences
|
|
||||||
to the names of the corresponding reduction methods.
|
|
||||||
"""
|
|
||||||
|
|
||||||
reducers = []
|
|
||||||
|
|
||||||
for key, value in cls_dict.items():
|
|
||||||
if not hasattr(value, 'reducers'):
|
|
||||||
continue
|
|
||||||
for reduction in value.reducers:
|
|
||||||
reducers.append((reduction, key))
|
|
||||||
|
|
||||||
cls_dict['reducers'] = reducers
|
|
||||||
|
|
||||||
return super(ParseStateMeta, mcs).__new__(mcs, name, bases, cls_dict)
|
|
||||||
|
|
||||||
|
|
||||||
def reducer(*tokens):
|
|
||||||
"""Decorator for reduction methods.
|
|
||||||
|
|
||||||
Arguments are a sequence of tokens, in order, which should trigger running
|
|
||||||
this reduction method.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def decorator(func):
|
|
||||||
# Make sure we have a list of reducer sequences
|
|
||||||
if not hasattr(func, 'reducers'):
|
|
||||||
func.reducers = []
|
|
||||||
|
|
||||||
# Add the tokens to the list of reducer sequences
|
|
||||||
func.reducers.append(list(tokens))
|
|
||||||
|
|
||||||
return func
|
|
||||||
|
|
||||||
return decorator
|
|
||||||
|
|
||||||
|
|
||||||
@six.add_metaclass(ParseStateMeta)
|
|
||||||
class ParseState(object):
|
|
||||||
"""Implement the core of parsing the policy language.
|
|
||||||
|
|
||||||
Uses a greedy reduction algorithm to reduce a sequence of tokens into
|
|
||||||
a single terminal, the value of which will be the root of the Check tree.
|
|
||||||
|
|
||||||
Note: error reporting is rather lacking. The best we can get with
|
|
||||||
this parser formulation is an overall "parse failed" error.
|
|
||||||
Fortunately, the policy language is simple enough that this
|
|
||||||
shouldn't be that big a problem.
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
"""Initialize the ParseState."""
|
|
||||||
|
|
||||||
self.tokens = []
|
|
||||||
self.values = []
|
|
||||||
|
|
||||||
def reduce(self):
|
|
||||||
"""Perform a greedy reduction of the token stream.
|
|
||||||
|
|
||||||
If a reducer method matches, it will be executed, then the
|
|
||||||
reduce() method will be called recursively to search for any more
|
|
||||||
possible reductions.
|
|
||||||
"""
|
|
||||||
|
|
||||||
for reduction, methname in self.reducers:
|
|
||||||
if (len(self.tokens) >= len(reduction) and
|
|
||||||
self.tokens[-len(reduction):] == reduction):
|
|
||||||
# Get the reduction method
|
|
||||||
meth = getattr(self, methname)
|
|
||||||
|
|
||||||
# Reduce the token stream
|
|
||||||
results = meth(*self.values[-len(reduction):])
|
|
||||||
|
|
||||||
# Update the tokens and values
|
|
||||||
self.tokens[-len(reduction):] = [r[0] for r in results]
|
|
||||||
self.values[-len(reduction):] = [r[1] for r in results]
|
|
||||||
|
|
||||||
# Check for any more reductions
|
|
||||||
return self.reduce()
|
|
||||||
|
|
||||||
def shift(self, tok, value):
|
|
||||||
"""Adds one more token to the state. Calls reduce()."""
|
|
||||||
|
|
||||||
self.tokens.append(tok)
|
|
||||||
self.values.append(value)
|
|
||||||
|
|
||||||
# Do a greedy reduce...
|
|
||||||
self.reduce()
|
|
||||||
|
|
||||||
@property
|
|
||||||
def result(self):
|
|
||||||
"""Obtain the final result of the parse.
|
|
||||||
|
|
||||||
Raises ValueError if the parse failed to reduce to a single result.
|
|
||||||
"""
|
|
||||||
|
|
||||||
if len(self.values) != 1:
|
|
||||||
raise ValueError("Could not parse rule")
|
|
||||||
return self.values[0]
|
|
||||||
|
|
||||||
@reducer('(', 'check', ')')
|
|
||||||
@reducer('(', 'and_expr', ')')
|
|
||||||
@reducer('(', 'or_expr', ')')
|
|
||||||
def _wrap_check(self, _p1, check, _p2):
|
|
||||||
"""Turn parenthesized expressions into a 'check' token."""
|
|
||||||
|
|
||||||
return [('check', check)]
|
|
||||||
|
|
||||||
@reducer('check', 'and', 'check')
|
|
||||||
def _make_and_expr(self, check1, _and, check2):
|
|
||||||
"""Create an 'and_expr'.
|
|
||||||
|
|
||||||
Join two checks by the 'and' operator.
|
|
||||||
"""
|
|
||||||
|
|
||||||
return [('and_expr', AndCheck([check1, check2]))]
|
|
||||||
|
|
||||||
@reducer('and_expr', 'and', 'check')
|
|
||||||
def _extend_and_expr(self, and_expr, _and, check):
|
|
||||||
"""Extend an 'and_expr' by adding one more check."""
|
|
||||||
|
|
||||||
return [('and_expr', and_expr.add_check(check))]
|
|
||||||
|
|
||||||
@reducer('check', 'or', 'check')
|
|
||||||
def _make_or_expr(self, check1, _or, check2):
|
|
||||||
"""Create an 'or_expr'.
|
|
||||||
|
|
||||||
Join two checks by the 'or' operator.
|
|
||||||
"""
|
|
||||||
|
|
||||||
return [('or_expr', OrCheck([check1, check2]))]
|
|
||||||
|
|
||||||
@reducer('or_expr', 'or', 'check')
|
|
||||||
def _extend_or_expr(self, or_expr, _or, check):
|
|
||||||
"""Extend an 'or_expr' by adding one more check."""
|
|
||||||
|
|
||||||
return [('or_expr', or_expr.add_check(check))]
|
|
||||||
|
|
||||||
@reducer('not', 'check')
|
|
||||||
def _make_not_expr(self, _not, check):
|
|
||||||
"""Invert the result of another check."""
|
|
||||||
|
|
||||||
return [('check', NotCheck(check))]
|
|
||||||
|
|
||||||
|
|
||||||
def _parse_text_rule(rule):
|
|
||||||
"""Parses policy to the tree.
|
|
||||||
|
|
||||||
Translates a policy written in the policy language into a tree of
|
|
||||||
Check objects.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Empty rule means always accept
|
|
||||||
if not rule:
|
|
||||||
return TrueCheck()
|
|
||||||
|
|
||||||
# Parse the token stream
|
|
||||||
state = ParseState()
|
|
||||||
for tok, value in _parse_tokenize(rule):
|
|
||||||
state.shift(tok, value)
|
|
||||||
|
|
||||||
try:
|
|
||||||
return state.result
|
|
||||||
except ValueError:
|
|
||||||
# Couldn't parse the rule
|
|
||||||
LOG.exception(_LE("Failed to understand rule %s") % rule)
|
|
||||||
|
|
||||||
# Fail closed
|
|
||||||
return FalseCheck()
|
|
||||||
|
|
||||||
|
|
||||||
def parse_rule(rule):
|
|
||||||
"""Parses a policy rule into a tree of Check objects."""
|
|
||||||
|
|
||||||
# If the rule is a string, it's in the policy language
|
|
||||||
if isinstance(rule, six.string_types):
|
|
||||||
return _parse_text_rule(rule)
|
|
||||||
return _parse_list_rule(rule)
|
|
||||||
|
|
||||||
|
|
||||||
def register(name, func=None):
|
|
||||||
"""Register a function or Check class as a policy check.
|
|
||||||
|
|
||||||
:param name: Gives the name of the check type, e.g., 'rule',
|
|
||||||
'role', etc. If name is None, a default check type
|
|
||||||
will be registered.
|
|
||||||
:param func: If given, provides the function or class to register.
|
|
||||||
If not given, returns a function taking one argument
|
|
||||||
to specify the function or class to register,
|
|
||||||
allowing use as a decorator.
|
|
||||||
"""
|
|
||||||
|
|
||||||
# Perform the actual decoration by registering the function or
|
|
||||||
# class. Returns the function or class for compliance with the
|
|
||||||
# decorator interface.
|
|
||||||
def decorator(func):
|
|
||||||
_checks[name] = func
|
|
||||||
return func
|
|
||||||
|
|
||||||
# If the function or class is given, do the registration
|
|
||||||
if func:
|
|
||||||
return decorator(func)
|
|
||||||
|
|
||||||
return decorator
|
|
||||||
|
|
||||||
|
|
||||||
@register("rule")
|
|
||||||
class RuleCheck(Check):
|
|
||||||
def __call__(self, target, creds, enforcer):
|
|
||||||
"""Recursively checks credentials based on the defined rules."""
|
|
||||||
|
|
||||||
try:
|
|
||||||
return enforcer.rules[self.match](target, creds, enforcer)
|
|
||||||
except KeyError:
|
|
||||||
# We don't have any matching rule; fail closed
|
|
||||||
return False
|
|
||||||
|
|
||||||
|
|
||||||
@register("role")
|
|
||||||
class RoleCheck(Check):
|
|
||||||
def __call__(self, target, creds, enforcer):
|
|
||||||
"""Check that there is a matching role in the cred dict."""
|
|
||||||
|
|
||||||
return self.match.lower() in [x.lower() for x in creds['roles']]
|
|
||||||
|
|
||||||
|
|
||||||
@register('http')
|
|
||||||
class HttpCheck(Check):
|
|
||||||
def __call__(self, target, creds, enforcer):
|
|
||||||
"""Check http: rules by calling to a remote server.
|
|
||||||
|
|
||||||
This example implementation simply verifies that the response
|
|
||||||
is exactly 'True'.
|
|
||||||
"""
|
|
||||||
|
|
||||||
url = ('http:' + self.match) % target
|
|
||||||
|
|
||||||
# Convert instances of object() in target temporarily to
|
|
||||||
# empty dict to avoid circular reference detection
|
|
||||||
# errors in jsonutils.dumps().
|
|
||||||
temp_target = copy.deepcopy(target)
|
|
||||||
for key in target.keys():
|
|
||||||
element = target.get(key)
|
|
||||||
if type(element) is object:
|
|
||||||
temp_target[key] = {}
|
|
||||||
|
|
||||||
data = {'target': jsonutils.dumps(temp_target),
|
|
||||||
'credentials': jsonutils.dumps(creds)}
|
|
||||||
post_data = urlparse.urlencode(data)
|
|
||||||
f = urlrequest.urlopen(url, post_data)
|
|
||||||
return f.read() == "True"
|
|
||||||
|
|
||||||
|
|
||||||
@register(None)
|
|
||||||
class GenericCheck(Check):
|
|
||||||
def __call__(self, target, creds, enforcer):
|
|
||||||
"""Check an individual match.
|
|
||||||
|
|
||||||
Matches look like:
|
|
||||||
|
|
||||||
tenant:%(tenant_id)s
|
|
||||||
role:compute:admin
|
|
||||||
True:%(user.enabled)s
|
|
||||||
'Member':%(role.name)s
|
|
||||||
"""
|
|
||||||
|
|
||||||
try:
|
|
||||||
match = self.match % target
|
|
||||||
except KeyError:
|
|
||||||
# While doing GenericCheck if key not
|
|
||||||
# present in Target return false
|
|
||||||
return False
|
|
||||||
|
|
||||||
try:
|
|
||||||
# Try to interpret self.kind as a literal
|
|
||||||
leftval = ast.literal_eval(self.kind)
|
|
||||||
except ValueError:
|
|
||||||
try:
|
|
||||||
kind_parts = self.kind.split('.')
|
|
||||||
leftval = creds
|
|
||||||
for kind_part in kind_parts:
|
|
||||||
leftval = leftval[kind_part]
|
|
||||||
except KeyError:
|
|
||||||
return False
|
|
||||||
return match == six.text_type(leftval)
|
|
@ -34,7 +34,6 @@ import glance.common.wsgi
|
|||||||
import glance.image_cache
|
import glance.image_cache
|
||||||
import glance.image_cache.drivers.sqlite
|
import glance.image_cache.drivers.sqlite
|
||||||
import glance.notifier
|
import glance.notifier
|
||||||
import glance.openstack.common.policy
|
|
||||||
import glance.registry
|
import glance.registry
|
||||||
import glance.registry.client
|
import glance.registry.client
|
||||||
import glance.registry.client.v1.api
|
import glance.registry.client.v1.api
|
||||||
@ -59,7 +58,6 @@ _api_opts = [
|
|||||||
glance.registry.client.registry_client_ctx_opts,
|
glance.registry.client.registry_client_ctx_opts,
|
||||||
glance.registry.client.registry_client_opts,
|
glance.registry.client.registry_client_opts,
|
||||||
glance.registry.client.v1.api.registry_client_ctx_opts,
|
glance.registry.client.v1.api.registry_client_ctx_opts,
|
||||||
glance.openstack.common.policy.policy_opts,
|
|
||||||
glance.scrubber.scrubber_opts))),
|
glance.scrubber.scrubber_opts))),
|
||||||
('image_format', glance.common.config.image_format_opts),
|
('image_format', glance.common.config.image_format_opts),
|
||||||
('task', glance.common.config.task_opts),
|
('task', glance.common.config.task_opts),
|
||||||
@ -73,14 +71,12 @@ _registry_opts = [
|
|||||||
glance.common.config.common_opts,
|
glance.common.config.common_opts,
|
||||||
glance.common.wsgi.bind_opts,
|
glance.common.wsgi.bind_opts,
|
||||||
glance.common.wsgi.socket_opts,
|
glance.common.wsgi.socket_opts,
|
||||||
glance.common.wsgi.eventlet_opts,
|
glance.common.wsgi.eventlet_opts))),
|
||||||
glance.openstack.common.policy.policy_opts))),
|
|
||||||
('paste_deploy', glance.common.config.paste_deploy_opts)
|
('paste_deploy', glance.common.config.paste_deploy_opts)
|
||||||
]
|
]
|
||||||
_scrubber_opts = [
|
_scrubber_opts = [
|
||||||
(None, list(itertools.chain(
|
(None, list(itertools.chain(
|
||||||
glance.common.config.common_opts,
|
glance.common.config.common_opts,
|
||||||
glance.openstack.common.policy.policy_opts,
|
|
||||||
glance.scrubber.scrubber_opts,
|
glance.scrubber.scrubber_opts,
|
||||||
glance.scrubber.scrubber_cmd_opts,
|
glance.scrubber.scrubber_cmd_opts,
|
||||||
glance.scrubber.scrubber_cmd_cli_opts,
|
glance.scrubber.scrubber_cmd_cli_opts,
|
||||||
@ -90,7 +86,6 @@ _scrubber_opts = [
|
|||||||
_cache_opts = [
|
_cache_opts = [
|
||||||
(None, list(itertools.chain(
|
(None, list(itertools.chain(
|
||||||
glance.common.config.common_opts,
|
glance.common.config.common_opts,
|
||||||
glance.openstack.common.policy.policy_opts,
|
|
||||||
glance.image_cache.drivers.sqlite.sqlite_opts,
|
glance.image_cache.drivers.sqlite.sqlite_opts,
|
||||||
glance.image_cache.image_cache_opts,
|
glance.image_cache.image_cache_opts,
|
||||||
glance.registry.registry_addr_opts,
|
glance.registry.registry_addr_opts,
|
||||||
|
@ -331,8 +331,6 @@ scrub_time = %(scrub_time)s
|
|||||||
scrubber_datadir = %(scrubber_datadir)s
|
scrubber_datadir = %(scrubber_datadir)s
|
||||||
image_cache_dir = %(image_cache_dir)s
|
image_cache_dir = %(image_cache_dir)s
|
||||||
image_cache_driver = %(image_cache_driver)s
|
image_cache_driver = %(image_cache_driver)s
|
||||||
policy_file = %(policy_file)s
|
|
||||||
policy_default_rule = %(policy_default_rule)s
|
|
||||||
data_api = %(data_api)s
|
data_api = %(data_api)s
|
||||||
sql_connection = %(sql_connection)s
|
sql_connection = %(sql_connection)s
|
||||||
show_image_direct_url = %(show_image_direct_url)s
|
show_image_direct_url = %(show_image_direct_url)s
|
||||||
@ -348,6 +346,9 @@ image_property_quota=%(image_property_quota)s
|
|||||||
image_tag_quota=%(image_tag_quota)s
|
image_tag_quota=%(image_tag_quota)s
|
||||||
image_location_quota=%(image_location_quota)s
|
image_location_quota=%(image_location_quota)s
|
||||||
location_strategy=%(location_strategy)s
|
location_strategy=%(location_strategy)s
|
||||||
|
[oslo_policy]
|
||||||
|
policy_file = %(policy_file)s
|
||||||
|
policy_default_rule = %(policy_default_rule)s
|
||||||
[paste_deploy]
|
[paste_deploy]
|
||||||
flavor = %(deployment_flavor)s
|
flavor = %(deployment_flavor)s
|
||||||
[store_type_location_strategy]
|
[store_type_location_strategy]
|
||||||
@ -459,6 +460,7 @@ enable_v2_registry = %(enable_v2_registry)s
|
|||||||
workers = %(workers)s
|
workers = %(workers)s
|
||||||
user_storage_quota = %(user_storage_quota)s
|
user_storage_quota = %(user_storage_quota)s
|
||||||
metadata_encryption_key = %(metadata_encryption_key)s
|
metadata_encryption_key = %(metadata_encryption_key)s
|
||||||
|
[oslo_policy]
|
||||||
policy_file = %(policy_file)s
|
policy_file = %(policy_file)s
|
||||||
policy_default_rule = %(policy_default_rule)s
|
policy_default_rule = %(policy_default_rule)s
|
||||||
[paste_deploy]
|
[paste_deploy]
|
||||||
@ -527,11 +529,12 @@ scrubber_datadir = %(scrubber_datadir)s
|
|||||||
registry_host = 127.0.0.1
|
registry_host = 127.0.0.1
|
||||||
registry_port = %(registry_port)s
|
registry_port = %(registry_port)s
|
||||||
metadata_encryption_key = %(metadata_encryption_key)s
|
metadata_encryption_key = %(metadata_encryption_key)s
|
||||||
policy_file = %(policy_file)s
|
|
||||||
policy_default_rule = %(policy_default_rule)s
|
|
||||||
lock_path = %(lock_path)s
|
lock_path = %(lock_path)s
|
||||||
sql_connection = %(sql_connection)s
|
sql_connection = %(sql_connection)s
|
||||||
sql_idle_timeout = 3600
|
sql_idle_timeout = 3600
|
||||||
|
[oslo_policy]
|
||||||
|
policy_file = %(policy_file)s
|
||||||
|
policy_default_rule = %(policy_default_rule)s
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def start(self, expect_exit=True, expected_exitcode=0, **kwargs):
|
def start(self, expect_exit=True, expected_exitcode=0, **kwargs):
|
||||||
|
@ -85,7 +85,7 @@ class FunctionalInitWrapper(functional.FunctionalTest):
|
|||||||
|
|
||||||
def setUp(self):
|
def setUp(self):
|
||||||
super(FunctionalInitWrapper, self).setUp()
|
super(FunctionalInitWrapper, self).setUp()
|
||||||
self.config(policy_file=self.policy_file)
|
self.config(policy_file=self.policy_file, group='oslo_policy')
|
||||||
|
|
||||||
|
|
||||||
class TestDriver(test_utils.BaseTestCase):
|
class TestDriver(test_utils.BaseTestCase):
|
||||||
|
@ -147,7 +147,7 @@ class ApiTest(test_utils.BaseTestCase):
|
|||||||
|
|
||||||
def _configure_policy(self):
|
def _configure_policy(self):
|
||||||
policy_file = self._copy_data_file('policy.json', self.test_dir)
|
policy_file = self._copy_data_file('policy.json', self.test_dir)
|
||||||
self.config(policy_file=policy_file)
|
self.config(policy_file=policy_file, group='oslo_policy')
|
||||||
|
|
||||||
def _configure_logging(self):
|
def _configure_logging(self):
|
||||||
self.config(default_log_levels=[
|
self.config(default_log_levels=[
|
||||||
|
@ -76,6 +76,6 @@ class IsolatedUnitTest(StoreClearingUnitTest):
|
|||||||
registry=self.registry)
|
registry=self.registry)
|
||||||
|
|
||||||
def set_policy_rules(self, rules):
|
def set_policy_rules(self, rules):
|
||||||
fap = open(CONF.policy_file, 'w')
|
fap = open(CONF.oslo_policy.policy_file, 'w')
|
||||||
fap.write(jsonutils.dumps(rules))
|
fap.write(jsonutils.dumps(rules))
|
||||||
fap.close()
|
fap.close()
|
||||||
|
@ -13,15 +13,16 @@
|
|||||||
# License for the specific language governing permissions and limitations
|
# License for the specific language governing permissions and limitations
|
||||||
# under the License.
|
# under the License.
|
||||||
|
|
||||||
|
from oslo_policy import policy
|
||||||
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
|
# NOTE(jokke): simplified transition to py3, behaves like py2 xrange
|
||||||
from six.moves import range
|
from six.moves import range
|
||||||
import testtools
|
import testtools
|
||||||
import webob
|
import webob
|
||||||
|
|
||||||
import glance.api.middleware.cache
|
import glance.api.middleware.cache
|
||||||
|
import glance.api.policy
|
||||||
from glance.common import exception
|
from glance.common import exception
|
||||||
from glance import context
|
from glance import context
|
||||||
from glance.openstack.common import policy
|
|
||||||
import glance.registry.client.v1.api as registry
|
import glance.registry.client.v1.api as registry
|
||||||
from glance.tests.unit import base
|
from glance.tests.unit import base
|
||||||
from glance.tests.unit import utils as unit_test_utils
|
from glance.tests.unit import utils as unit_test_utils
|
||||||
@ -171,8 +172,7 @@ class ProcessRequestTestCacheFilter(glance.api.middleware.cache.CacheFilter):
|
|||||||
|
|
||||||
class TestCacheMiddlewareProcessRequest(base.IsolatedUnitTest):
|
class TestCacheMiddlewareProcessRequest(base.IsolatedUnitTest):
|
||||||
def _enforcer_from_rules(self, unparsed_rules):
|
def _enforcer_from_rules(self, unparsed_rules):
|
||||||
rules = dict((k, policy.parse_rule(v))
|
rules = policy.Rules.from_dict(unparsed_rules)
|
||||||
for (k, v) in unparsed_rules.items())
|
|
||||||
enforcer = glance.api.policy.Enforcer()
|
enforcer = glance.api.policy.Enforcer()
|
||||||
enforcer.set_rules(rules, overwrite=True)
|
enforcer.set_rules(rules, overwrite=True)
|
||||||
return enforcer
|
return enforcer
|
||||||
|
@ -64,9 +64,6 @@ class OptsTestCase(utils.BaseTestCase):
|
|||||||
'owner_is_tenant',
|
'owner_is_tenant',
|
||||||
'admin_role',
|
'admin_role',
|
||||||
'allow_anonymous_access',
|
'allow_anonymous_access',
|
||||||
'policy_file',
|
|
||||||
'policy_default_rule',
|
|
||||||
'policy_dirs',
|
|
||||||
'allow_additional_image_properties',
|
'allow_additional_image_properties',
|
||||||
'image_member_quota',
|
'image_member_quota',
|
||||||
'image_property_quota',
|
'image_property_quota',
|
||||||
@ -155,9 +152,6 @@ class OptsTestCase(utils.BaseTestCase):
|
|||||||
'owner_is_tenant',
|
'owner_is_tenant',
|
||||||
'admin_role',
|
'admin_role',
|
||||||
'allow_anonymous_access',
|
'allow_anonymous_access',
|
||||||
'policy_file',
|
|
||||||
'policy_default_rule',
|
|
||||||
'policy_dirs',
|
|
||||||
'allow_additional_image_properties',
|
'allow_additional_image_properties',
|
||||||
'image_member_quota',
|
'image_member_quota',
|
||||||
'image_property_quota',
|
'image_property_quota',
|
||||||
@ -203,9 +197,6 @@ class OptsTestCase(utils.BaseTestCase):
|
|||||||
None
|
None
|
||||||
]
|
]
|
||||||
expected_opt_names = [
|
expected_opt_names = [
|
||||||
'policy_file',
|
|
||||||
'policy_default_rule',
|
|
||||||
'policy_dirs',
|
|
||||||
'allow_additional_image_properties',
|
'allow_additional_image_properties',
|
||||||
'image_member_quota',
|
'image_member_quota',
|
||||||
'image_property_quota',
|
'image_property_quota',
|
||||||
@ -255,9 +246,6 @@ class OptsTestCase(utils.BaseTestCase):
|
|||||||
None
|
None
|
||||||
]
|
]
|
||||||
expected_opt_names = [
|
expected_opt_names = [
|
||||||
'policy_file',
|
|
||||||
'policy_default_rule',
|
|
||||||
'policy_dirs',
|
|
||||||
'allow_additional_image_properties',
|
'allow_additional_image_properties',
|
||||||
'image_member_quota',
|
'image_member_quota',
|
||||||
'image_property_quota',
|
'image_property_quota',
|
||||||
|
@ -136,7 +136,8 @@ class TestPolicyEnforcer(base.IsolatedUnitTest):
|
|||||||
enforcer.enforce, context, 'get_image', {})
|
enforcer.enforce, context, 'get_image', {})
|
||||||
|
|
||||||
def test_policy_file_custom_location(self):
|
def test_policy_file_custom_location(self):
|
||||||
self.config(policy_file=os.path.join(self.test_dir, 'gobble.gobble'))
|
self.config(policy_file=os.path.join(self.test_dir, 'gobble.gobble'),
|
||||||
|
group='oslo_policy')
|
||||||
|
|
||||||
rules = {"get_image": '!'}
|
rules = {"get_image": '!'}
|
||||||
self.set_policy_rules(rules)
|
self.set_policy_rules(rules)
|
||||||
@ -148,7 +149,8 @@ class TestPolicyEnforcer(base.IsolatedUnitTest):
|
|||||||
enforcer.enforce, context, 'get_image', {})
|
enforcer.enforce, context, 'get_image', {})
|
||||||
|
|
||||||
def test_policy_file_check(self):
|
def test_policy_file_check(self):
|
||||||
self.config(policy_file=os.path.join(self.test_dir, 'gobble.gobble'))
|
self.config(policy_file=os.path.join(self.test_dir, 'gobble.gobble'),
|
||||||
|
group='oslo_policy')
|
||||||
|
|
||||||
rules = {"get_image": '!'}
|
rules = {"get_image": '!'}
|
||||||
self.set_policy_rules(rules)
|
self.set_policy_rules(rules)
|
||||||
@ -162,7 +164,7 @@ class TestPolicyEnforcer(base.IsolatedUnitTest):
|
|||||||
class TestPolicyEnforcerNoFile(base.IsolatedUnitTest):
|
class TestPolicyEnforcerNoFile(base.IsolatedUnitTest):
|
||||||
def test_policy_file_specified_but_not_found(self):
|
def test_policy_file_specified_but_not_found(self):
|
||||||
"""Missing defined policy file should result in a default ruleset"""
|
"""Missing defined policy file should result in a default ruleset"""
|
||||||
self.config(policy_file='gobble.gobble')
|
self.config(policy_file='gobble.gobble', group='oslo_policy')
|
||||||
enforcer = glance.api.policy.Enforcer()
|
enforcer = glance.api.policy.Enforcer()
|
||||||
|
|
||||||
context = glance.context.RequestContext(roles=[])
|
context = glance.context.RequestContext(roles=[])
|
||||||
@ -470,7 +472,8 @@ class TestContextPolicyEnforcer(base.IsolatedUnitTest):
|
|||||||
context_role,
|
context_role,
|
||||||
context_is_admin,
|
context_is_admin,
|
||||||
admin_expected):
|
admin_expected):
|
||||||
self.config(policy_file=os.path.join(self.test_dir, 'gobble.gobble'))
|
self.config(policy_file=os.path.join(self.test_dir, 'gobble.gobble'),
|
||||||
|
group='oslo_policy')
|
||||||
|
|
||||||
rules = {'context_is_admin': 'role:%s' % policy_admin_role}
|
rules = {'context_is_admin': 'role:%s' % policy_admin_role}
|
||||||
self.set_policy_rules(rules)
|
self.set_policy_rules(rules)
|
||||||
|
@ -19,6 +19,7 @@ import mock
|
|||||||
import six
|
import six
|
||||||
import webob
|
import webob
|
||||||
|
|
||||||
|
import glance.api.policy
|
||||||
import glance.api.v2.image_data
|
import glance.api.v2.image_data
|
||||||
from glance.common import exception
|
from glance.common import exception
|
||||||
from glance.common import wsgi
|
from glance.common import wsgi
|
||||||
|
@ -66,7 +66,7 @@ class BaseTestCase(testtools.TestCase):
|
|||||||
def set_policy(self):
|
def set_policy(self):
|
||||||
conf_file = "policy.json"
|
conf_file = "policy.json"
|
||||||
self.policy_file = self._copy_data_file(conf_file, self.conf_dir)
|
self.policy_file = self._copy_data_file(conf_file, self.conf_dir)
|
||||||
self.config(policy_file=self.policy_file)
|
self.config(policy_file=self.policy_file, group='oslo_policy')
|
||||||
|
|
||||||
def set_property_protections(self, use_policies=False):
|
def set_property_protections(self, use_policies=False):
|
||||||
self.unset_property_protections()
|
self.unset_property_protections()
|
||||||
|
@ -2,7 +2,6 @@
|
|||||||
|
|
||||||
# The list of modules to copy from oslo-incubator
|
# The list of modules to copy from oslo-incubator
|
||||||
module=install_venv_common
|
module=install_venv_common
|
||||||
module=policy
|
|
||||||
module=service
|
module=service
|
||||||
|
|
||||||
# The base module to hold the copy of openstack.common
|
# The base module to hold the copy of openstack.common
|
||||||
|
@ -53,6 +53,7 @@ oslo.db>=1.5.0 # Apache-2.0
|
|||||||
oslo.i18n>=1.3.0 # Apache-2.0
|
oslo.i18n>=1.3.0 # Apache-2.0
|
||||||
oslo.log>=0.4.0 # Apache-2.0
|
oslo.log>=0.4.0 # Apache-2.0
|
||||||
oslo.messaging>=1.6.0 # Apache-2.0
|
oslo.messaging>=1.6.0 # Apache-2.0
|
||||||
|
oslo.policy~=0.3.1 # Apache-2.0
|
||||||
oslo.serialization>=1.2.0 # Apache-2.0
|
oslo.serialization>=1.2.0 # Apache-2.0
|
||||||
|
|
||||||
retrying>=1.2.3,!=1.3.0 # Apache-2.0
|
retrying>=1.2.3,!=1.3.0 # Apache-2.0
|
||||||
|
Loading…
x
Reference in New Issue
Block a user