Merge "Remove postgresql code"

This commit is contained in:
Zuul 2024-11-07 04:56:39 +00:00 committed by Gerrit Code Review
commit 57b059e36b
34 changed files with 97 additions and 437 deletions

View File

@ -10,9 +10,7 @@ gettext [test]
libffi-dev [platform:dpkg]
libffi-devel [platform:rpm]
# MySQL and PostgreSQL databases since some jobs are set up in
# OpenStack infra that need these like
# periodic-neutron-py35-with-neutron-lib-master.
# MySQL database since some jobs are set up in OpenStack infra that need this.
libmariadb-dev-compat [platform:debian]
libmysqlclient-dev [platform:ubuntu]
mariadb [platform:rpm test]
@ -20,10 +18,6 @@ mariadb-devel [platform:rpm test]
mariadb-server [platform:rpm platform:redhat platform:debian test]
mysql-client [platform:dpkg !platform:debian test]
mysql-server [platform:dpkg !platform:debian test]
postgresql [test]
postgresql-client [platform:dpkg test]
postgresql-devel [platform:rpm test]
postgresql-server [platform:rpm test]
haproxy
keepalived

View File

@ -96,22 +96,6 @@ development.
update revision numbers of parent objects. For more details on all of the
things that can go wrong using bulk delete operations, see the "Warning"
sections in the link above.
* For PostgreSQL if you're using GROUP BY everything in the SELECT list must be
an aggregate SUM(...), COUNT(...), etc or used in the GROUP BY.
The incorrect variant:
.. code:: python
q = query(Object.id, Object.name,
func.count(Object.number)).group_by(Object.name)
The correct variant:
.. code:: python
q = query(Object.id, Object.name,
func.count(Object.number)).group_by(Object.id, Object.name)
* Beware of the `InvalidRequestError <http://docs.sqlalchemy.org/en/latest/faq/sessions.html#this-session-s-transaction-has-been-rolled-back-due-to-a-previous-exception-during-flush-or-similar>`_ exception.
There is even a `Neutron bug <https://bugs.launchpad.net/neutron/+bug/1409774>`_
registered for it. Bear in mind that this error may also occur when nesting
@ -152,24 +136,6 @@ development.
result = session.execute(mymodel.insert().values(**values))
# result.inserted_primary_key is a list even if we inserted a unique row!
* Beware of pymysql which can silently unwrap a list with an element (and hide
a wrong use of ResultProxy.inserted_primary_key for example):
.. code:: python
e.execute("create table if not exists foo (bar integer)")
e.execute(foo.insert().values(bar=1))
e.execute(foo.insert().values(bar=[2]))
The 2nd insert should crash (list provided, integer expected). It crashes at
least with mysql and postgresql backends, but succeeds with pymysql because
it transforms them into:
.. code:: sql
INSERT INTO foo (bar) VALUES (1)
INSERT INTO foo (bar) VALUES ((2))
System development
~~~~~~~~~~~~~~~~~~

View File

@ -61,7 +61,7 @@ Tests to verify that database migrations and models are in sync
.. automodule:: neutron.tests.functional.db.test_migrations
.. autoclass:: _TestModelsMigrations
.. autoclass:: TestModelsMigrations
:members:

View File

@ -38,7 +38,7 @@ This test compares models with the result of existing migrations. It is based on
which is provided by oslo.db and was adapted for Neutron. It compares core
Neutron models and vendor specific models with migrations from Neutron core and
migrations from the driver/plugin repo. This test is functional - it runs
against MySQL and PostgreSQL dialects. The detailed description of this test
against the MySQL dialect. The detailed description of this test
can be found in Neutron Database Layer
section - :ref:`testing-database-migrations`.
@ -107,7 +107,9 @@ with the following content: ::
EXTERNAL_TABLES = set(external.TABLES) - set(external.REPO_FOO_TABLES)
class _TestModelsMigrationsFoo(test_migrations._TestModelsMigrations):
class TestModelsMigrations(testlib_api.MySQLTestCaseMixin,
testlib_api.SqlTestCaseLight,
test_migrations.TestModelsMigrations):
def db_sync(self, engine):
cfg.CONF.set_override('connection', engine.url, group='database')
@ -128,18 +130,6 @@ with the following content: ::
return True
class TestModelsMigrationsMysql(testlib_api.MySQLTestCaseMixin,
_TestModelsMigrationsFoo,
testlib_api.SqlTestCaseLight):
pass
class TestModelsMigrationsPsql(testlib_api.PostgreSQLTestCaseMixin,
_TestModelsMigrationsFoo,
testlib_api.SqlTestCaseLight):
pass
3. Add functional requirements
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -150,7 +140,6 @@ test execution.
::
psutil>=3.2.2 # BSD
psycopg2
PyMySQL>=0.6.2 # MIT License

View File

@ -53,7 +53,6 @@ from oslo_utils import uuidutils
from oslo_utils import versionutils
from osprofiler import profiler
from sqlalchemy.dialects.mysql import dialect as mysql_dialect
from sqlalchemy.dialects.postgresql import dialect as postgresql_dialect
from sqlalchemy.dialects.sqlite import dialect as sqlite_dialect
from sqlalchemy.sql.expression import func as sql_func
@ -1018,9 +1017,8 @@ def get_sql_random_method(sql_dialect_name):
"""Return the SQL random method supported depending on the dialect."""
# NOTE(ralonsoh): this method is a good candidate to be implemented in
# oslo.db.
# https://www.postgresql.org/docs/8.2/functions-math.html
# https://www.sqlite.org/c3ref/randomness.html
if sql_dialect_name in (postgresql_dialect.name, sqlite_dialect.name):
if sql_dialect_name == sqlite_dialect.name:
return sql_func.random
# https://dev.mysql.com/doc/refman/8.0/en/mathematical-functions.html
elif sql_dialect_name == mysql_dialect.name:

View File

@ -133,9 +133,7 @@ def rename_table_if_exists(old_table_name, new_table_name):
def alter_enum_add_value(table, column, enum, nullable, server_default=None):
'''If we need to expand Enum values for some column - for PostgreSQL this
can be done with ALTER TYPE function. For MySQL, it can be done with
ordinary alembic alter_column function.
'''Expand Enum values for a column.
:param table:table name
:param column: column name
@ -143,86 +141,9 @@ def alter_enum_add_value(table, column, enum, nullable, server_default=None):
:param nullable: existing nullable for column.
:param server_default: existing or new server_default for the column
'''
bind = op.get_bind()
engine = bind.engine
if engine.name == 'postgresql':
values = {'name': enum.name,
'values': ", ".join("'" + i + "'" for i in enum.enums),
'column': column,
'table': table,
'server_default': server_default}
if server_default is not None:
op.execute("ALTER TABLE %(table)s ALTER COLUMN %(column)s"
" DROP DEFAULT" % values)
op.execute("ALTER TYPE %(name)s rename to old_%(name)s" % values)
op.execute("CREATE TYPE %(name)s AS enum (%(values)s)" % values)
op.execute("ALTER TABLE %(table)s ALTER COLUMN %(column)s TYPE "
"%(name)s USING %(column)s::text::%(name)s " % values)
if server_default is not None:
op.execute("ALTER TABLE %(table)s ALTER COLUMN %(column)s"
" SET DEFAULT '%(server_default)s'" % values)
op.execute("DROP TYPE old_%(name)s" % values)
else:
op.alter_column(table, column, type_=enum,
existing_nullable=nullable,
server_default=server_default)
def alter_enum(table, column, enum_type, nullable,
server_default=None, do_drop=True,
do_rename=True, do_create=True):
"""Alter a enum type column.
Set the do_xx parameters only when the modified enum type
is used by multiple columns. Else don't provide these
parameters.
:param server_default: existing or new server_default for the column
:param do_drop: set to False when modified column is
not the last one use this enum
:param do_rename: set to False when modified column is
not the first one use this enum
:param do_create: set to False when modified column is
not the first one use this enum
"""
bind = op.get_bind()
engine = bind.engine
if engine.name == 'postgresql':
values = {'table': table,
'column': column,
'name': enum_type.name}
if do_rename:
op.execute("ALTER TYPE %(name)s RENAME TO old_%(name)s" % values)
if do_create:
enum_type.create(bind, checkfirst=False)
op.execute("ALTER TABLE %(table)s RENAME COLUMN %(column)s TO "
"old_%(column)s" % values)
op.add_column(table, sa.Column(column, enum_type, nullable=nullable,
server_default=server_default))
op.execute("UPDATE %(table)s SET %(column)s = " # nosec
"old_%(column)s::text::%(name)s" % values)
op.execute("ALTER TABLE %(table)s DROP COLUMN old_%(column)s" % values)
if do_drop:
op.execute("DROP TYPE old_%(name)s" % values)
else:
op.alter_column(table, column, type_=enum_type,
existing_nullable=nullable,
server_default=server_default)
def create_table_if_not_exist_psql(table_name, values):
if op.get_bind().engine.dialect.server_version_info < (9, 1, 0):
op.execute("CREATE LANGUAGE plpgsql")
op.execute("CREATE OR REPLACE FUNCTION execute(TEXT) RETURNS VOID AS $$"
"BEGIN EXECUTE $1; END;"
"$$ LANGUAGE plpgsql STRICT;")
op.execute("CREATE OR REPLACE FUNCTION table_exist(TEXT) RETURNS bool as "
"$$ SELECT exists(select 1 from pg_class where relname=$1);"
"$$ language sql STRICT;")
op.execute("SELECT execute($$CREATE TABLE %(name)s %(columns)s $$) "
"WHERE NOT table_exist(%(name)r);" %
{'name': table_name,
'columns': values})
op.alter_column(table, column, type_=enum,
existing_nullable=nullable,
server_default=server_default)
def get_unique_constraints_map(table):

View File

@ -41,9 +41,6 @@ direction_enum = sa.Enum(
def upgrade():
if op.get_context().bind.dialect.name == 'postgresql':
direction_enum.create(op.get_bind(), checkfirst=True)
with migration.remove_fks_from_table(bw_limit_table_name,
remove_unique_constraints=True):
op.add_column(bw_limit_table_name,
@ -62,21 +59,12 @@ def expand_drop_exceptions():
Drop the existing QoS policy foreign key uniq constraint and then replace
it with new unique constraint for pair (policy_id, direction).
As names of constraints are different in MySQL and PGSQL there is need to
add both variants to drop exceptions.
"""
# TODO(slaweq): replace hardcoded constaints names with names get directly
# from database model after bug
# https://bugs.launchpad.net/neutron/+bug/1685352 will be closed
return {
sa.ForeignKeyConstraint: [
"qos_bandwidth_limit_rules_ibfk_1", # MySQL name
"qos_bandwidth_limit_rules_qos_policy_id_fkey" # PGSQL name
],
sa.UniqueConstraint: [
"qos_policy_id", # MySQL name
"qos_bandwidth_limit_rules_qos_policy_id_key" # PGSQL name
]
sa.ForeignKeyConstraint: ["qos_bandwidth_limit_rules_ibfk_1"],
sa.UniqueConstraint: ["qos_policy_id"]
}

View File

@ -36,8 +36,6 @@ down_revision = 'd8bdf05313f4'
# https://dev.mysql.com/doc/relnotes/mysql/8.0/en/news-8-0-16.html
# - MariaDB: since version 10.2.1 (July 2016)
# https://mariadb.com/kb/en/mariadb-1021-release-notes/
# - PostgreSQL: since version 9.4 (December 2014)
# https://www.postgresql.org/docs/9.4/ddl-constraints.html
#
# If the DB engine does not support yet this feature, it will be ignored. The
# VLAN tag constraint is enforced in the Neutron API. This extra enforcement

View File

@ -34,8 +34,7 @@ TABLES = ['networkrbacs', 'qospolicyrbacs', 'securitygrouprbacs',
'addressscoperbacs', 'subnetpoolrbacs', 'addressgrouprbacs']
DROPPED_UNIQUE_CONSTRAINTS = [
'uniq_networkrbacs0tenant_target0object_id0action',
'qospolicyrbacs_target_tenant_object_id_action_key', # PSQL
'target_tenant', # MySQL, name provided by mistake
'target_tenant', # name provided by mistake
'uniq_securitygrouprbacs0target_tenant0object_id0action',
'uniq_address_scopes_rbacs0target_tenant0object_id0action',
'uniq_subnetpools_rbacs0target_tenant0object_id0action',

View File

@ -55,8 +55,5 @@ def expand_drop_exceptions():
it is needed first to drop it, modify it and readd it again.
"""
return {
sa.ForeignKeyConstraint: [
'securitygroupportbindings_ibfk_2', # MySQL name
'securitygroupportbindings_security_group_id_fkey', # PGSQL name
],
sa.ForeignKeyConstraint: ['securitygroupportbindings_ibfk_2']
}

View File

@ -706,10 +706,10 @@ class SecurityGroupDbMixin(
def _get_ip_proto_number(self, protocol):
if protocol in const.SG_RULE_PROTO_ANY:
return
# According to bug 1381379, protocol is always set to string to avoid
# problems with comparing int and string in PostgreSQL. Here this
# string is converted to int to give an opportunity to use it as
# before.
# According to bug 1381379, protocol is always set to string. This was
# done to avoid problems with comparing int and string in PostgreSQL.
# (Since then, the backend is no longer supported.) Here this string is
# converted to int to give an opportunity to use it as before.
if protocol in constants.IP_PROTOCOL_NAME_ALIASES:
protocol = constants.IP_PROTOCOL_NAME_ALIASES[protocol]
return int(constants.IP_PROTOCOL_MAP.get(protocol, protocol))

View File

@ -14,6 +14,7 @@
# under the License.
import abc
import typing
import netaddr
from neutron_lib.api import converters
@ -148,15 +149,12 @@ class SecurityGroupRuleInvalidEtherType(exceptions.InvalidInput):
"supported. Allowed values are %(values)s.")
def convert_protocol(value):
def convert_protocol(value) -> typing.Optional[str]:
if value in _constants.SG_RULE_PROTO_ANY:
return
return None
try:
val = int(value)
if 0 <= val <= 255:
# Set value of protocol number to string due to bug 1381379,
# PostgreSQL fails when it tries to compare integer with string,
# that exists in db.
return str(value)
raise SecurityGroupRuleInvalidProtocol(
protocol=value, values=sg_supported_protocols)

View File

@ -57,12 +57,11 @@ class SubPort(base.NeutronDbObject):
super().create()
except o_db_exc.DBReferenceError as ex:
if ex.key_table is None:
# NOTE(ivc): 'key_table' is provided by 'oslo.db' [1]
# only for a limited set of database backends (i.e.
# MySQL and PostgreSQL). Other database backends
# (including SQLite) would have 'key_table' set to None.
# We emulate the 'key_table' support for such database
# backends.
# NOTE(ivc): 'key_table' is provided by 'oslo.db' [1] only for
# a limited set of database backends (i.e. MySQL). Other
# database backends (including SQLite) would have 'key_table'
# set to None. We emulate the 'key_table' support for such
# database backends.
#
# [1] https://github.com/openstack/oslo.db/blob/3fadd5a
# /oslo_db/sqlalchemy/exc_filters.py#L190-L203

View File

@ -50,18 +50,9 @@ class TagPlugin(tagging.TagPluginBase):
def _extend_tags_dict(response_data, db_data):
if not directory.get_plugin(tagging.TAG_PLUGIN_TYPE):
return
try:
tags = [tag_db.tag for tag_db in db_data.standard_attr.tags]
except AttributeError:
# NOTE(ralonsoh): this method can be called from a "list"
# operation. If one resource and its "standardattr" register is
# deleted concurrently, the "standard_attr" field retrieval will
# fail.
# The "list" operation is protected with a READER transaction
# context; however this is failing with the DB PostgreSQL backend.
# https://bugs.launchpad.net/neutron/+bug/2078787
tags = []
response_data['tags'] = tags
response_data['tags'] = [
tag_db.tag for tag_db in db_data.standard_attr.tags
]
@db_api.CONTEXT_READER
def _get_resource(self, context, resource, resource_id):

View File

@ -29,8 +29,8 @@ class TestWaitUntilTrue(base.BaseLoggingTestCase):
utils.wait_until_true(lambda: False, 2)
class _TestIsSessionActive(testlib_api.SqlTestCase):
class TestIsSessionActive(testlib_api.SqlTestCase,
testlib_api.MySQLTestCaseMixin):
DRIVER = None
def setUp(self):
@ -47,13 +47,3 @@ class _TestIsSessionActive(testlib_api.SqlTestCase):
self.assertTrue(db_api.is_session_active(context.session))
self.assertFalse(db_api.is_session_active(context.session))
class TestIsSessionActivePostgreSQL(testlib_api.PostgreSQLTestCaseMixin,
_TestIsSessionActive):
pass
class TestIsSessionActiveMySQL(testlib_api.MySQLTestCaseMixin,
_TestIsSessionActive):
pass

View File

@ -20,7 +20,7 @@ from neutron.db.migration.alembic_migrations.versions.yoga.expand import \
from neutron.tests.functional.db import test_migrations
class TestAddIndexesToRbacsMixin:
class TestAddIndexesToRbacs(test_migrations.TestWalkMigrations):
"""Validates binding_index for NetworkDhcpAgentBinding migration."""
@staticmethod
@ -41,15 +41,3 @@ class TestAddIndexesToRbacsMixin:
table_indexes = db_utils.get_indexes(engine, table + 'rbacs')
for column in _migration.COLUMNS:
self.assertTrue(self.get_index(table_indexes, column))
class TestAddIndexesToRbacsMySQL(
TestAddIndexesToRbacsMixin,
test_migrations.TestWalkMigrationsMySQL):
pass
class TestAddIndexesToRbacsPostgreSQL(
TestAddIndexesToRbacsMixin,
test_migrations.TestWalkMigrationsPostgreSQL):
pass

View File

@ -21,7 +21,7 @@ from oslo_utils import uuidutils
from neutron.tests.functional.db import test_migrations
class NetworkDhcpAgentBindingMigrationMixin:
class TestNetworkDhcpAgentBindingMigration(test_migrations.TestWalkMigrations):
"""Validates binding_index for NetworkDhcpAgentBinding migration."""
def _create_so(self, o_type, values):
@ -79,15 +79,3 @@ class NetworkDhcpAgentBindingMigrationMixin:
for binding_indices in networks_to_bindings.values():
self.assertEqual(list(range(1, 3)), sorted(binding_indices))
class TestNetworkDhcpAgentBindingMigrationMySQL(
NetworkDhcpAgentBindingMigrationMixin,
test_migrations.TestWalkMigrationsMySQL):
pass
class TestNetworkDhcpAgentBindingMigrationPostgreSQL(
NetworkDhcpAgentBindingMigrationMixin,
test_migrations.TestWalkMigrationsPostgreSQL):
pass

View File

@ -35,7 +35,7 @@ load_tests = testlib_api.module_load_tests
# load_tests = test_base.optimize_db_test_loader(__file__)
class IpamTestCase(testlib_api.SqlTestCase):
class IpamTestCase(testlib_api.SqlTestCase, testlib_api.MySQLTestCaseMixin):
"""Base class for tests that aim to test ip allocation."""
def setUp(self):
super().setUp()
@ -138,11 +138,3 @@ class IpamTestCase(testlib_api.SqlTestCase):
self.assert_ip_alloc_pool_matches(ip_alloc_pool_expected)
with testtools.ExpectedException(n_exc.IpAddressGenerationFailure):
self._create_port(self.port_id)
class TestIpamMySQL(testlib_api.MySQLTestCaseMixin, IpamTestCase):
pass
class TestIpamPostgreSQL(testlib_api.PostgreSQLTestCaseMixin, IpamTestCase):
pass

View File

@ -81,7 +81,10 @@ def upgrade(engine, alembic_config, branch_name='heads'):
branch_name)
class _TestModelsMigrations(test_migrations.ModelsMigrationsSync):
class TestModelsMigrations(test_migrations.ModelsMigrationsSync,
testlib_api.MySQLTestCaseMixin,
testlib_api.SqlTestCaseLight,
functional_base.BaseLoggingTestCase):
'''Test for checking of equality models state and migrations.
For the opportunistic testing you need to set up a db named
@ -89,14 +92,6 @@ class _TestModelsMigrations(test_migrations.ModelsMigrationsSync):
'openstack_citest' on localhost.
The test will then use that db and user/password combo to run the tests.
For PostgreSQL on Ubuntu this can be done with the following commands::
sudo -u postgres psql
postgres=# create user openstack_citest with createdb login password
'openstack_citest';
postgres=# create database openstack_citest with owner
openstack_citest;
For MySQL on Ubuntu this can be done with the following commands::
mysql -u root
@ -210,13 +205,13 @@ class _TestModelsMigrations(test_migrations.ModelsMigrationsSync):
def test_upgrade_expand_branch(self):
# Verify that "command neutron-db-manage upgrade --expand" works
# without errors. Check this for both MySQL and PostgreSQL.
# without errors.
upgrade(self.engine, self.alembic_config,
branch_name='%s@head' % migration.EXPAND_BRANCH)
def test_upgrade_contract_branch(self):
# Verify that "command neutron-db-manage upgrade --contract" works
# without errors. Check this for both MySQL and PostgreSQL.
# without errors.
upgrade(self.engine, self.alembic_config,
branch_name='%s@head' % migration.CONTRACT_BRANCH)
@ -353,12 +348,6 @@ class _TestModelsMigrations(test_migrations.ModelsMigrationsSync):
self.alembic_config, 'unused'),
msg='Offline contract migration scripts are forbidden for Ocata+')
class TestModelsMigrationsMySQL(testlib_api.MySQLTestCaseMixin,
_TestModelsMigrations,
testlib_api.SqlTestCaseLight,
functional_base.BaseLoggingTestCase):
def test_check_mysql_engine(self):
engine = self.get_engine()
url_str = render_url_str(engine.url)
@ -382,12 +371,6 @@ class TestModelsMigrationsMySQL(testlib_api.MySQLTestCaseMixin,
super().test_models_sync()
class TestModelsMigrationsPostgreSQL(testlib_api.PostgreSQLTestCaseMixin,
_TestModelsMigrations,
testlib_api.SqlTestCaseLight):
pass
class TestSanityCheck(testlib_api.SqlTestCaseLight):
BUILD_SCHEMA = False
@ -525,7 +508,8 @@ class TestWalkDowngrade(oslotest_base.BaseTestCase):
return True
class _TestWalkMigrations:
class TestWalkMigrations(testlib_api.MySQLTestCaseMixin,
testlib_api.SqlTestCaseLight):
'''This will add framework for testing schema migration
for different backends.
@ -575,6 +559,13 @@ class _TestWalkMigrations:
migration.do_alembic_command(config, 'upgrade', dest)
check(engine, data)
# NOTE(slaweq): this workaround is taken from Manila patch:
# https://review.opendev.org/#/c/291397/
# Set 5 minutes timeout for case of running it on very slow nodes/VMs.
# Note, that this test becomes slower with each addition of new DB
# migration. On fast nodes it can take about 5-10 secs having Mitaka set of
# migrations.
@test_base.set_timeout(600)
def test_walk_versions(self):
"""Test migrations ability to upgrade and downgrade.
@ -590,26 +581,3 @@ class _TestWalkMigrations:
if upgrade_dest:
migration.do_alembic_command(config, 'upgrade', upgrade_dest)
class TestWalkMigrationsMySQL(testlib_api.MySQLTestCaseMixin,
_TestWalkMigrations,
testlib_api.SqlTestCaseLight):
# NOTE(slaweq): this workaround is taken from Manila patch:
# https://review.opendev.org/#/c/291397/
# Set 5 minutes timeout for case of running it on
# very slow nodes/VMs. Note, that this test becomes slower with each
# addition of new DB migration. On fast nodes it can take about 5-10
# secs having Mitaka set of migrations. 'pymysql' works much slower
# on slow nodes than 'psycopg2' and because of that this increased
# timeout is required only when for testing with 'mysql' backend.
@test_base.set_timeout(600)
def test_walk_versions(self):
super().test_walk_versions()
class TestWalkMigrationsPostgreSQL(testlib_api.PostgreSQLTestCaseMixin,
_TestWalkMigrations,
testlib_api.SqlTestCaseLight):
pass

View File

@ -20,7 +20,8 @@ from neutron_lib import context
from neutron.tests.unit import testlib_api
class _SegmentAllocation(testlib_api.SqlTestCase):
class SegmentAllocation(testlib_api.SqlTestCase,
testlib_api.MySQLTestCaseMixin):
PHYSNETS = ('phys1', 'phys2')
NUM_SEGIDS = 10
@ -78,13 +79,3 @@ class _SegmentAllocation(testlib_api.SqlTestCase):
self.assertEqual(0, len(self.segments))
self.assertIsNone(m_get(self.context))
class _SegmentAllocationMySQL(_SegmentAllocation,
testlib_api.MySQLTestCaseMixin):
pass
class _SegmentAllocationPostgreSQL(_SegmentAllocation,
testlib_api.PostgreSQLTestCaseMixin):
pass

View File

@ -17,10 +17,5 @@ from neutron.objects.plugins.ml2 import geneveallocation
from neutron.tests.functional.objects.plugins.ml2 import test_base
class TestGeneveSegmentAllocationMySQL(test_base._SegmentAllocationMySQL):
segment_allocation_class = geneveallocation.GeneveAllocation
class TestGeneveSegmentAllocationPostgreSQL(
test_base._SegmentAllocationPostgreSQL):
class TestGeneveSegmentAllocation(test_base.SegmentAllocation):
segment_allocation_class = geneveallocation.GeneveAllocation

View File

@ -17,10 +17,5 @@ from neutron.objects.plugins.ml2 import greallocation
from neutron.tests.functional.objects.plugins.ml2 import test_base
class TestGreSegmentAllocationMySQL(test_base._SegmentAllocationMySQL):
segment_allocation_class = greallocation.GreAllocation
class TestGreSegmentAllocationPostgreSQL(
test_base._SegmentAllocationPostgreSQL):
class TestGreSegmentAllocation(test_base.SegmentAllocation):
segment_allocation_class = greallocation.GreAllocation

View File

@ -17,10 +17,5 @@ from neutron.objects.plugins.ml2 import vlanallocation
from neutron.tests.functional.objects.plugins.ml2 import test_base
class TestVlanSegmentAllocationMySQL(test_base._SegmentAllocationMySQL):
segment_allocation_class = vlanallocation.VlanAllocation
class TestVlanSegmentAllocationPostgreSQL(
test_base._SegmentAllocationPostgreSQL):
class TestVlanSegmentAllocation(test_base.SegmentAllocation):
segment_allocation_class = vlanallocation.VlanAllocation

View File

@ -17,10 +17,5 @@ from neutron.objects.plugins.ml2 import vxlanallocation
from neutron.tests.functional.objects.plugins.ml2 import test_base
class TestVxlanSegmentAllocationMySQL(test_base._SegmentAllocationMySQL):
segment_allocation_class = vxlanallocation.VxlanAllocation
class TestVxlanSegmentAllocationPostgreSQL(
test_base._SegmentAllocationPostgreSQL):
class TestVxlanSegmentAllocation(test_base.SegmentAllocation):
segment_allocation_class = vxlanallocation.VxlanAllocation

View File

@ -24,8 +24,8 @@ from neutron.objects import quota
from neutron.tests.unit import testlib_api
class _ReservationSql(testlib_api.SqlTestCase):
class TestReservationSql(testlib_api.SqlTestCase,
testlib_api.MySQLTestCaseMixin):
def setUp(self):
super().setUp()
self.context = context.Context(user_id=None, tenant_id=None,
@ -63,13 +63,3 @@ class _ReservationSql(testlib_api.SqlTestCase):
res.project_id, resources, True)
self.assertEqual({'port': 100}, res_map)
self.assertIsInstance(res_map['port'], int)
class TestReservationMySQL(testlib_api.MySQLTestCaseMixin,
_ReservationSql):
pass
class TestReservationPostgreSQL(testlib_api.PostgreSQLTestCaseMixin,
_ReservationSql):
pass

View File

@ -4,5 +4,4 @@
# of appearance. Changing the order has an impact on the overall integration
# process, which may cause wedges in the gate later.
psycopg2
PyMySQL>=0.6.2 # MIT License

View File

@ -260,15 +260,6 @@ class MySQLTestCaseMixin(OpportunisticDBTestMixin):
DRIVER = "mysql"
class PostgreSQLTestCaseMixin(OpportunisticDBTestMixin):
"""Mixin that turns any BaseSqlTestCase into a PostgresSQL test suite.
If the PostgreSQL db is unavailable then this test is skipped, unless
OS_FAIL_ON_MISSING_DEPS is enabled.
"""
DRIVER = "postgresql"
def module_load_tests(loader, found_tests, pattern):
"""Apply OptimisingTestSuite on a per-module basis.

View File

@ -0,0 +1,5 @@
---
upgrade:
- |
PostgreSQL is no longer supported. You are advised to migrate to a
supported database backend (one of MySQL derivatives).

View File

@ -22,7 +22,7 @@
Q_BUILD_OVS_FROM_GIT={{ Q_BUILD_OVS_FROM_GIT }}
MEMORY_TRACKER={{ MEMORY_TRACKER }}
INSTALL_OVN={{ INSTALL_OVN }}
# This is DB USER used in e.g. pgsql db
# This is DB USER used in mysql db
DATABASE_USER=openstack_citest
MYSQL_GATHER_PERFORMANCE={{ MYSQL_GATHER_PERFORMANCE | default(true) }}

View File

@ -15,8 +15,6 @@
/*
Fix wrongly parented physical NIC resource providers due to bug
https://bugs.launchpad.net/neutron/+bug/1921150
Compatible with MySQL.
*/
USE placement;

View File

@ -152,10 +152,8 @@ function _install_rpc_backend {
}
# _install_databases [install_pg]
function _install_databases {
local install_pg=${1:-True}
# _install_database
function _install_database {
echo_summary "Installing databases"
# Avoid attempting to configure the db if it appears to already
@ -175,13 +173,6 @@ function _install_databases {
install_database
configure_database_mysql
if [[ "$install_pg" == "True" ]]; then
enable_service postgresql
initialize_database_backends
install_database
configure_database_postgresql
fi
# Set up the '${DATABASE_USER}' user and '${DATABASE_NAME}' database in each backend
tmp_dir=$(mktemp -d)
trap "rm -rf $tmp_dir" EXIT
@ -193,17 +184,6 @@ GRANT ALL PRIVILEGES ON *.* TO '${DATABASE_USER}'@'localhost';
FLUSH PRIVILEGES;
EOF
/usr/bin/mysql -u $MYSQL_USER -p"$MYSQL_PASSWORD" < $tmp_dir/mysql.sql
if [[ "$install_pg" == "True" ]]; then
cat << EOF > $tmp_dir/postgresql.sql
CREATE USER ${DATABASE_USER} WITH CREATEDB LOGIN PASSWORD '${DATABASE_PASSWORD}';
CREATE DATABASE ${DATABASE_NAME} WITH OWNER ${DATABASE_USER};
EOF
# User/group postgres needs to be given access to tmp_dir
setfacl -m g:postgres:rwx $tmp_dir
sudo -u root sudo -u postgres /usr/bin/psql --file=$tmp_dir/postgresql.sql
fi
}
@ -263,7 +243,7 @@ EOF
function _install_post_devstack {
echo_summary "Performing post-devstack installation"
_install_databases
_install_database
_install_rootwrap_sudoers
if is_ubuntu; then
@ -342,7 +322,7 @@ _init
if [[ "$IS_GATE" != "True" ]]; then
if [[ "$INSTALL_MYSQL_ONLY" == "True" ]]; then
_install_databases nopg
_install_database
else
configure_host_for_func_testing
fi

View File

@ -88,8 +88,8 @@ setenv = {[testenv:dsvm-functional]setenv}
deps = {[testenv:dsvm-functional]deps}
commands =
bash {toxinidir}/tools/deploy_rootwrap.sh {toxinidir} {envdir}/etc {envdir}/bin
stestr run --slowest --exclude-regex (.*MySQL\.|.*PostgreSQL\.|.*test_get_all_devices|.*TestMetadataAgent\.|.*BaseOVSTestCase\.|.*test_periodic_sync_routers_task) {posargs}
stestr run --slowest --combine --concurrency 1 (.*MySQL\.|.*PostgreSQL\.|.*test_get_all_devices|.*TestMetadataAgent\.|.*BaseOVSTestCase\.|.*test_periodic_sync_routers_task) {posargs}
stestr run --slowest --exclude-regex (.*MySQL\.|.*test_get_all_devices|.*TestMetadataAgent\.|.*BaseOVSTestCase\.|.*test_periodic_sync_routers_task) {posargs}
stestr run --slowest --combine --concurrency 1 (.*MySQL\.|.*test_get_all_devices|.*TestMetadataAgent\.|.*BaseOVSTestCase\.|.*test_periodic_sync_routers_task) {posargs}
[testenv:dsvm-fullstack]
description =

View File

@ -104,7 +104,6 @@
- neutron-ovs-tempest-slow
- neutron-ovn-tempest-slow
- neutron-ovs-tempest-with-os-ken-master
- neutron-ovn-tempest-postgres-full
- neutron-ovn-tempest-mariadb-full
- neutron-ovn-tempest-ipv6-only-ovs-master
- neutron-ovn-tempest-ovs-master-centos-9-stream

View File

@ -223,69 +223,6 @@
name: neutron-tempest-iptables_hybrid
parent: neutron-ovs-tempest-iptables_hybrid
- job:
name: neutron-ovn-tempest-postgres-full
parent: tempest-integrated-networking
timeout: 10800
required-projects:
- openstack/neutron
- openstack/tempest
vars:
devstack_plugins:
neutron: https://opendev.org/openstack/neutron.git
devstack_localrc:
CIRROS_VERSION: 0.6.2
DEFAULT_IMAGE_NAME: cirros-0.6.2-x86_64-uec
DEFAULT_IMAGE_FILE_NAME: cirros-0.6.2-x86_64-uec.tar.gz
NEUTRON_DEPLOY_MOD_WSGI: true
devstack_services:
postgresql: true
mysql: false
br-ex-tcpdump: true
br-int-flows: true
# Cinder services
c-api: false
c-bak: false
c-sch: false
c-vol: false
cinder: false
# Swift services
s-account: false
s-container: false
s-object: false
s-proxy: false
zuul_copy_output:
'/var/log/ovn': 'logs'
'/var/log/openvswitch': 'logs'
'/var/lib/ovn': 'logs'
irrelevant-files: &tempest-db-irrelevant-files
- ^\.pre-commit-config\.yaml$
- ^\.pylintrc$
- ^test-requirements.txt$
- ^.*\.conf\.sample$
- ^.*\.rst$
- ^doc/.*$
- ^neutron/locale/.*$
- ^neutron/tests/.*$
- ^releasenotes/.*$
- ^setup.cfg$
- ^tools/.*$
- ^tox.ini$
- ^plugin.spec$
- ^tools/ovn_migration/.*$
- ^vagrant/.*$
- ^neutron/agent/.*$
- ^neutron/plugins/ml2/drivers/.*$
- ^roles/.*functional.*$
- ^playbooks/.*functional.*$
# Ignore everything except for zuul.d/project.yaml
- ^zuul.d/base.yaml
- ^zuul.d/grenade.yaml
- ^zuul.d/job-templates.yaml
- ^zuul.d/rally.yaml
- ^zuul.d/tempest-multinode.yaml
- ^zuul.d/tempest-singlenode.yaml
- job:
name: neutron-ovn-tempest-mariadb-full
parent: tempest-integrated-networking
@ -324,7 +261,33 @@
# NOTE(ralonsoh): once MariaDB default version in Ubuntu is bumped to
# >10.1, this workaround can be removed (bug 1855912)
pre-run: playbooks/add_mariadb_repo.yaml
irrelevant-files: *tempest-db-irrelevant-files
irrelevant-files: &tempest-db-irrelevant-files
- ^\.pre-commit-config\.yaml$
- ^\.pylintrc$
- ^test-requirements.txt$
- ^.*\.conf\.sample$
- ^.*\.rst$
- ^doc/.*$
- ^neutron/locale/.*$
- ^neutron/tests/.*$
- ^releasenotes/.*$
- ^setup.cfg$
- ^tools/.*$
- ^tox.ini$
- ^plugin.spec$
- ^tools/ovn_migration/.*$
- ^vagrant/.*$
- ^neutron/agent/.*$
- ^neutron/plugins/ml2/drivers/.*$
- ^roles/.*functional.*$
- ^playbooks/.*functional.*$
# Ignore everything except for zuul.d/project.yaml
- ^zuul.d/base.yaml
- ^zuul.d/grenade.yaml
- ^zuul.d/job-templates.yaml
- ^zuul.d/rally.yaml
- ^zuul.d/tempest-multinode.yaml
- ^zuul.d/tempest-singlenode.yaml
- job:
name: neutron-ovs-tempest-with-os-ken-master