a80c720af5
For this commit, ssync is just a direct replacement for how we use rsync. Assuming we switch over to ssync completely someday and drop rsync, we will then be able to improve the algorithms even further (removing local objects as we successfully transfer each one rather than waiting for whole partitions, using an index.db with hash-trees, etc., etc.) For easier review, this commit can be thought of in distinct parts: 1) New global_conf_callback functionality for allowing services to perform setup code before workers, etc. are launched. (This is then used by ssync in the object server to create a cross-worker semaphore to restrict concurrent incoming replication.) 2) A bit of shifting of items up from object server and replicator to diskfile or DEFAULT conf sections for better sharing of the same settings. conn_timeout, node_timeout, client_timeout, network_chunk_size, disk_chunk_size. 3) Modifications to the object server and replicator to optionally use ssync in place of rsync. This is done in a generic enough way that switching to FutureSync should be easy someday. 4) The biggest part, and (at least for now) completely optional part, are the new ssync_sender and ssync_receiver files. Nice and isolated for easier testing and visibility into test coverage, etc. All the usual logging, statsd, recon, etc. instrumentation is still there when using ssync, just as it is when using rsync. Beyond the essential error and exceptional condition logging, I have not added any additional instrumentation at this time. Unless there is something someone finds super pressing to have added to the logging, I think such additions would be better as separate change reviews. FOR NOW, IT IS NOT RECOMMENDED TO USE SSYNC ON PRODUCTION CLUSTERS. Some of us will be in a limited fashion to look for any subtle issues, tuning, etc. but generally ssync is an experimental feature. In its current implementation it is probably going to be a bit slower than rsync, but if all goes according to plan it will end up much faster. There are no comparisions yet between ssync and rsync other than some raw virtual machine testing I've done to show it should compete well enough once we can put it in use in the real world. If you Tweet, Google+, or whatever, be sure to indicate it's experimental. It'd be best to keep it out of deployment guides, howtos, etc. until we all figure out if we like it, find it to be stable, etc. Change-Id: If003dcc6f4109e2d2a42f4873a0779110fff16d6
801 lines
31 KiB
Python
801 lines
31 KiB
Python
# Copyright (c) 2013 OpenStack Foundation
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
|
# implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
import StringIO
|
|
import unittest
|
|
|
|
import eventlet
|
|
import mock
|
|
|
|
from swift.common import exceptions
|
|
from swift.obj import ssync_sender
|
|
|
|
|
|
class FakeReplicator(object):
|
|
|
|
def __init__(self):
|
|
self.logger = mock.MagicMock()
|
|
self.conn_timeout = 1
|
|
self.node_timeout = 2
|
|
self.http_timeout = 3
|
|
self.network_chunk_size = 65536
|
|
self.disk_chunk_size = 4096
|
|
self._diskfile_mgr = mock.MagicMock()
|
|
|
|
|
|
class NullBufferedHTTPConnection(object):
|
|
|
|
def __init__(*args, **kwargs):
|
|
pass
|
|
|
|
def putrequest(*args, **kwargs):
|
|
pass
|
|
|
|
def putheader(*args, **kwargs):
|
|
pass
|
|
|
|
def endheaders(*args, **kwargs):
|
|
pass
|
|
|
|
def getresponse(*args, **kwargs):
|
|
pass
|
|
|
|
|
|
class FakeResponse(object):
|
|
|
|
def __init__(self, chunk_body=''):
|
|
self.status = 200
|
|
self.close_called = False
|
|
if chunk_body:
|
|
self.fp = StringIO.StringIO(
|
|
'%x\r\n%s\r\n0\r\n\r\n' % (len(chunk_body), chunk_body))
|
|
|
|
def close(self):
|
|
self.close_called = True
|
|
|
|
|
|
class FakeConnection(object):
|
|
|
|
def __init__(self):
|
|
self.sent = []
|
|
self.closed = False
|
|
|
|
def send(self, data):
|
|
self.sent.append(data)
|
|
|
|
def close(self):
|
|
self.closed = True
|
|
|
|
|
|
class TestSender(unittest.TestCase):
|
|
|
|
def setUp(self):
|
|
self.replicator = FakeReplicator()
|
|
self.sender = ssync_sender.Sender(self.replicator, None, None, None)
|
|
|
|
def test_call_catches_MessageTimeout(self):
|
|
|
|
def connect(self):
|
|
exc = exceptions.MessageTimeout(1, 'test connect')
|
|
# Cancels Eventlet's raising of this since we're about to do it.
|
|
exc.cancel()
|
|
raise exc
|
|
|
|
with mock.patch.object(ssync_sender.Sender, 'connect', connect):
|
|
node = dict(ip='1.2.3.4', port=5678, device='sda1')
|
|
job = dict(partition='9')
|
|
self.sender = ssync_sender.Sender(self.replicator, node, job, None)
|
|
self.sender.suffixes = ['abc']
|
|
self.assertFalse(self.sender())
|
|
call = self.replicator.logger.error.mock_calls[0]
|
|
self.assertEqual(
|
|
call[1][:-1], ('%s:%s/%s/%s %s', '1.2.3.4', 5678, 'sda1', '9'))
|
|
self.assertEqual(str(call[1][-1]), '1 second: test connect')
|
|
|
|
def test_call_catches_ReplicationException(self):
|
|
|
|
def connect(self):
|
|
raise exceptions.ReplicationException('test connect')
|
|
|
|
with mock.patch.object(ssync_sender.Sender, 'connect', connect):
|
|
node = dict(ip='1.2.3.4', port=5678, device='sda1')
|
|
job = dict(partition='9')
|
|
self.sender = ssync_sender.Sender(self.replicator, node, job, None)
|
|
self.sender.suffixes = ['abc']
|
|
self.assertFalse(self.sender())
|
|
call = self.replicator.logger.error.mock_calls[0]
|
|
self.assertEqual(
|
|
call[1][:-1], ('%s:%s/%s/%s %s', '1.2.3.4', 5678, 'sda1', '9'))
|
|
self.assertEqual(str(call[1][-1]), 'test connect')
|
|
|
|
def test_call_catches_other_exceptions(self):
|
|
node = dict(ip='1.2.3.4', port=5678, device='sda1')
|
|
job = dict(partition='9')
|
|
self.sender = ssync_sender.Sender(self.replicator, node, job, None)
|
|
self.sender.suffixes = ['abc']
|
|
self.sender.connect = 'cause exception'
|
|
self.assertFalse(self.sender())
|
|
call = self.replicator.logger.exception.mock_calls[0]
|
|
self.assertEqual(
|
|
call[1],
|
|
('%s:%s/%s/%s EXCEPTION in replication.Sender', '1.2.3.4', 5678,
|
|
'sda1', '9'))
|
|
|
|
def test_call_catches_exception_handling_exception(self):
|
|
node = dict(ip='1.2.3.4', port=5678, device='sda1')
|
|
job = None # Will cause inside exception handler to fail
|
|
self.sender = ssync_sender.Sender(self.replicator, node, job, None)
|
|
self.sender.suffixes = ['abc']
|
|
self.sender.connect = 'cause exception'
|
|
self.assertFalse(self.sender())
|
|
self.replicator.logger.exception.assert_called_once_with(
|
|
'EXCEPTION in replication.Sender')
|
|
|
|
def test_call_calls_others(self):
|
|
self.sender.suffixes = ['abc']
|
|
self.sender.connect = mock.MagicMock()
|
|
self.sender.missing_check = mock.MagicMock()
|
|
self.sender.updates = mock.MagicMock()
|
|
self.sender.disconnect = mock.MagicMock()
|
|
self.assertTrue(self.sender())
|
|
self.sender.connect.assert_called_once_with()
|
|
self.sender.missing_check.assert_called_once_with()
|
|
self.sender.updates.assert_called_once_with()
|
|
self.sender.disconnect.assert_called_once_with()
|
|
|
|
def test_call_calls_others_returns_failure(self):
|
|
self.sender.suffixes = ['abc']
|
|
self.sender.connect = mock.MagicMock()
|
|
self.sender.missing_check = mock.MagicMock()
|
|
self.sender.updates = mock.MagicMock()
|
|
self.sender.disconnect = mock.MagicMock()
|
|
self.sender.failures = 1
|
|
self.assertFalse(self.sender())
|
|
self.sender.connect.assert_called_once_with()
|
|
self.sender.missing_check.assert_called_once_with()
|
|
self.sender.updates.assert_called_once_with()
|
|
self.sender.disconnect.assert_called_once_with()
|
|
|
|
def test_connect_send_timeout(self):
|
|
self.replicator.conn_timeout = 0.01
|
|
node = dict(ip='1.2.3.4', port=5678, device='sda1')
|
|
job = dict(partition='9')
|
|
self.sender = ssync_sender.Sender(self.replicator, node, job, None)
|
|
self.sender.suffixes = ['abc']
|
|
|
|
def putrequest(*args, **kwargs):
|
|
eventlet.sleep(0.1)
|
|
|
|
with mock.patch.object(
|
|
ssync_sender.bufferedhttp.BufferedHTTPConnection,
|
|
'putrequest', putrequest):
|
|
self.assertFalse(self.sender())
|
|
call = self.replicator.logger.error.mock_calls[0]
|
|
self.assertEqual(
|
|
call[1][:-1], ('%s:%s/%s/%s %s', '1.2.3.4', 5678, 'sda1', '9'))
|
|
self.assertEqual(str(call[1][-1]), '0.01 seconds: connect send')
|
|
|
|
def test_connect_receive_timeout(self):
|
|
self.replicator.node_timeout = 0.02
|
|
node = dict(ip='1.2.3.4', port=5678, device='sda1')
|
|
job = dict(partition='9')
|
|
self.sender = ssync_sender.Sender(self.replicator, node, job, None)
|
|
self.sender.suffixes = ['abc']
|
|
|
|
class FakeBufferedHTTPConnection(NullBufferedHTTPConnection):
|
|
|
|
def getresponse(*args, **kwargs):
|
|
eventlet.sleep(0.1)
|
|
|
|
with mock.patch.object(
|
|
ssync_sender.bufferedhttp, 'BufferedHTTPConnection',
|
|
FakeBufferedHTTPConnection):
|
|
self.assertFalse(self.sender())
|
|
call = self.replicator.logger.error.mock_calls[0]
|
|
self.assertEqual(
|
|
call[1][:-1], ('%s:%s/%s/%s %s', '1.2.3.4', 5678, 'sda1', '9'))
|
|
self.assertEqual(str(call[1][-1]), '0.02 seconds: connect receive')
|
|
|
|
def test_connect_bad_status(self):
|
|
self.replicator.node_timeout = 0.02
|
|
node = dict(ip='1.2.3.4', port=5678, device='sda1')
|
|
job = dict(partition='9')
|
|
self.sender = ssync_sender.Sender(self.replicator, node, job, None)
|
|
self.sender.suffixes = ['abc']
|
|
|
|
class FakeBufferedHTTPConnection(NullBufferedHTTPConnection):
|
|
def getresponse(*args, **kwargs):
|
|
response = FakeResponse()
|
|
response.status = 503
|
|
return response
|
|
|
|
with mock.patch.object(
|
|
ssync_sender.bufferedhttp, 'BufferedHTTPConnection',
|
|
FakeBufferedHTTPConnection):
|
|
self.assertFalse(self.sender())
|
|
call = self.replicator.logger.error.mock_calls[0]
|
|
self.assertEqual(
|
|
call[1][:-1], ('%s:%s/%s/%s %s', '1.2.3.4', 5678, 'sda1', '9'))
|
|
self.assertEqual(str(call[1][-1]), 'Expected status 200; got 503')
|
|
|
|
def test_readline_newline_in_buffer(self):
|
|
self.sender.response_buffer = 'Has a newline already.\r\nOkay.'
|
|
self.assertEqual(self.sender.readline(), 'Has a newline already.\r\n')
|
|
self.assertEqual(self.sender.response_buffer, 'Okay.')
|
|
|
|
def test_readline_buffer_exceeds_network_chunk_size_somehow(self):
|
|
self.replicator.network_chunk_size = 2
|
|
self.sender.response_buffer = '1234567890'
|
|
self.assertEqual(self.sender.readline(), '1234567890')
|
|
self.assertEqual(self.sender.response_buffer, '')
|
|
|
|
def test_readline_at_start_of_chunk(self):
|
|
self.sender.response = FakeResponse()
|
|
self.sender.response.fp = StringIO.StringIO('2\r\nx\n\r\n')
|
|
self.assertEqual(self.sender.readline(), 'x\n')
|
|
|
|
def test_readline_chunk_with_extension(self):
|
|
self.sender.response = FakeResponse()
|
|
self.sender.response.fp = StringIO.StringIO(
|
|
'2 ; chunk=extension\r\nx\n\r\n')
|
|
self.assertEqual(self.sender.readline(), 'x\n')
|
|
|
|
def test_readline_broken_chunk(self):
|
|
self.sender.response = FakeResponse()
|
|
self.sender.response.fp = StringIO.StringIO('q\r\nx\n\r\n')
|
|
self.assertRaises(
|
|
exceptions.ReplicationException, self.sender.readline)
|
|
self.assertTrue(self.sender.response.close_called)
|
|
|
|
def test_readline_terminated_chunk(self):
|
|
self.sender.response = FakeResponse()
|
|
self.sender.response.fp = StringIO.StringIO('b\r\nnot enough')
|
|
self.assertRaises(
|
|
exceptions.ReplicationException, self.sender.readline)
|
|
self.assertTrue(self.sender.response.close_called)
|
|
|
|
def test_readline_all(self):
|
|
self.sender.response = FakeResponse()
|
|
self.sender.response.fp = StringIO.StringIO('2\r\nx\n\r\n0\r\n\r\n')
|
|
self.assertEqual(self.sender.readline(), 'x\n')
|
|
self.assertEqual(self.sender.readline(), '')
|
|
self.assertEqual(self.sender.readline(), '')
|
|
|
|
def test_readline_all_trailing_not_newline_termed(self):
|
|
self.sender.response = FakeResponse()
|
|
self.sender.response.fp = StringIO.StringIO(
|
|
'2\r\nx\n\r\n3\r\n123\r\n0\r\n\r\n')
|
|
self.assertEqual(self.sender.readline(), 'x\n')
|
|
self.assertEqual(self.sender.readline(), '123')
|
|
self.assertEqual(self.sender.readline(), '')
|
|
self.assertEqual(self.sender.readline(), '')
|
|
|
|
def test_missing_check_timeout(self):
|
|
self.sender.connection = FakeConnection()
|
|
self.sender.connection.send = lambda d: eventlet.sleep(1)
|
|
self.sender.daemon.node_timeout = 0.01
|
|
self.assertRaises(exceptions.MessageTimeout, self.sender.missing_check)
|
|
|
|
def test_missing_check_has_empty_suffixes(self):
|
|
def yield_hashes(device, partition, suffixes=None):
|
|
if device != 'dev' or partition != '9' or suffixes != [
|
|
'abc', 'def']:
|
|
yield # Just here to make this a generator
|
|
raise Exception(
|
|
'No match for %r %r %r' % (device, partition, suffixes))
|
|
|
|
self.sender.connection = FakeConnection()
|
|
self.sender.job = {'device': 'dev', 'partition': '9'}
|
|
self.sender.suffixes = ['abc', 'def']
|
|
self.sender.response = FakeResponse(
|
|
chunk_body=(
|
|
':MISSING_CHECK: START\r\n'
|
|
':MISSING_CHECK: END\r\n'))
|
|
self.sender.daemon._diskfile_mgr.yield_hashes = yield_hashes
|
|
self.sender.missing_check()
|
|
self.assertEqual(
|
|
''.join(self.sender.connection.sent),
|
|
'17\r\n:MISSING_CHECK: START\r\n\r\n'
|
|
'15\r\n:MISSING_CHECK: END\r\n\r\n')
|
|
self.assertEqual(self.sender.send_list, [])
|
|
|
|
def test_missing_check_has_suffixes(self):
|
|
def yield_hashes(device, partition, suffixes=None):
|
|
if device == 'dev' and partition == '9' and suffixes == [
|
|
'abc', 'def']:
|
|
yield (
|
|
'/srv/node/dev/objects/9/abc/'
|
|
'9d41d8cd98f00b204e9800998ecf0abc',
|
|
'9d41d8cd98f00b204e9800998ecf0abc',
|
|
'1380144470.00000')
|
|
yield (
|
|
'/srv/node/dev/objects/9/def/'
|
|
'9d41d8cd98f00b204e9800998ecf0def',
|
|
'9d41d8cd98f00b204e9800998ecf0def',
|
|
'1380144472.22222')
|
|
yield (
|
|
'/srv/node/dev/objects/9/def/'
|
|
'9d41d8cd98f00b204e9800998ecf1def',
|
|
'9d41d8cd98f00b204e9800998ecf1def',
|
|
'1380144474.44444')
|
|
else:
|
|
raise Exception(
|
|
'No match for %r %r %r' % (device, partition, suffixes))
|
|
|
|
self.sender.connection = FakeConnection()
|
|
self.sender.job = {'device': 'dev', 'partition': '9'}
|
|
self.sender.suffixes = ['abc', 'def']
|
|
self.sender.response = FakeResponse(
|
|
chunk_body=(
|
|
':MISSING_CHECK: START\r\n'
|
|
':MISSING_CHECK: END\r\n'))
|
|
self.sender.daemon._diskfile_mgr.yield_hashes = yield_hashes
|
|
self.sender.missing_check()
|
|
self.assertEqual(
|
|
''.join(self.sender.connection.sent),
|
|
'17\r\n:MISSING_CHECK: START\r\n\r\n'
|
|
'33\r\n9d41d8cd98f00b204e9800998ecf0abc 1380144470.00000\r\n\r\n'
|
|
'33\r\n9d41d8cd98f00b204e9800998ecf0def 1380144472.22222\r\n\r\n'
|
|
'33\r\n9d41d8cd98f00b204e9800998ecf1def 1380144474.44444\r\n\r\n'
|
|
'15\r\n:MISSING_CHECK: END\r\n\r\n')
|
|
self.assertEqual(self.sender.send_list, [])
|
|
|
|
def test_missing_check_far_end_disconnect(self):
|
|
def yield_hashes(device, partition, suffixes=None):
|
|
if device == 'dev' and partition == '9' and suffixes == ['abc']:
|
|
yield (
|
|
'/srv/node/dev/objects/9/abc/'
|
|
'9d41d8cd98f00b204e9800998ecf0abc',
|
|
'9d41d8cd98f00b204e9800998ecf0abc',
|
|
'1380144470.00000')
|
|
else:
|
|
raise Exception(
|
|
'No match for %r %r %r' % (device, partition, suffixes))
|
|
|
|
self.sender.connection = FakeConnection()
|
|
self.sender.job = {'device': 'dev', 'partition': '9'}
|
|
self.sender.suffixes = ['abc']
|
|
self.sender.daemon._diskfile_mgr.yield_hashes = yield_hashes
|
|
self.sender.response = FakeResponse(chunk_body='\r\n')
|
|
exc = None
|
|
try:
|
|
self.sender.missing_check()
|
|
except exceptions.ReplicationException as err:
|
|
exc = err
|
|
self.assertEqual(str(exc), 'Early disconnect')
|
|
self.assertEqual(
|
|
''.join(self.sender.connection.sent),
|
|
'17\r\n:MISSING_CHECK: START\r\n\r\n'
|
|
'33\r\n9d41d8cd98f00b204e9800998ecf0abc 1380144470.00000\r\n\r\n'
|
|
'15\r\n:MISSING_CHECK: END\r\n\r\n')
|
|
|
|
def test_missing_check_far_end_disconnect2(self):
|
|
def yield_hashes(device, partition, suffixes=None):
|
|
if device == 'dev' and partition == '9' and suffixes == ['abc']:
|
|
yield (
|
|
'/srv/node/dev/objects/9/abc/'
|
|
'9d41d8cd98f00b204e9800998ecf0abc',
|
|
'9d41d8cd98f00b204e9800998ecf0abc',
|
|
'1380144470.00000')
|
|
else:
|
|
raise Exception(
|
|
'No match for %r %r %r' % (device, partition, suffixes))
|
|
|
|
self.sender.connection = FakeConnection()
|
|
self.sender.job = {'device': 'dev', 'partition': '9'}
|
|
self.sender.suffixes = ['abc']
|
|
self.sender.daemon._diskfile_mgr.yield_hashes = yield_hashes
|
|
self.sender.response = FakeResponse(
|
|
chunk_body=':MISSING_CHECK: START\r\n')
|
|
exc = None
|
|
try:
|
|
self.sender.missing_check()
|
|
except exceptions.ReplicationException as err:
|
|
exc = err
|
|
self.assertEqual(str(exc), 'Early disconnect')
|
|
self.assertEqual(
|
|
''.join(self.sender.connection.sent),
|
|
'17\r\n:MISSING_CHECK: START\r\n\r\n'
|
|
'33\r\n9d41d8cd98f00b204e9800998ecf0abc 1380144470.00000\r\n\r\n'
|
|
'15\r\n:MISSING_CHECK: END\r\n\r\n')
|
|
|
|
def test_missing_check_far_end_unexpected(self):
|
|
def yield_hashes(device, partition, suffixes=None):
|
|
if device == 'dev' and partition == '9' and suffixes == ['abc']:
|
|
yield (
|
|
'/srv/node/dev/objects/9/abc/'
|
|
'9d41d8cd98f00b204e9800998ecf0abc',
|
|
'9d41d8cd98f00b204e9800998ecf0abc',
|
|
'1380144470.00000')
|
|
else:
|
|
raise Exception(
|
|
'No match for %r %r %r' % (device, partition, suffixes))
|
|
|
|
self.sender.connection = FakeConnection()
|
|
self.sender.job = {'device': 'dev', 'partition': '9'}
|
|
self.sender.suffixes = ['abc']
|
|
self.sender.daemon._diskfile_mgr.yield_hashes = yield_hashes
|
|
self.sender.response = FakeResponse(chunk_body='OH HAI\r\n')
|
|
exc = None
|
|
try:
|
|
self.sender.missing_check()
|
|
except exceptions.ReplicationException as err:
|
|
exc = err
|
|
self.assertEqual(str(exc), "Unexpected response: 'OH HAI'")
|
|
self.assertEqual(
|
|
''.join(self.sender.connection.sent),
|
|
'17\r\n:MISSING_CHECK: START\r\n\r\n'
|
|
'33\r\n9d41d8cd98f00b204e9800998ecf0abc 1380144470.00000\r\n\r\n'
|
|
'15\r\n:MISSING_CHECK: END\r\n\r\n')
|
|
|
|
def test_missing_check_send_list(self):
|
|
def yield_hashes(device, partition, suffixes=None):
|
|
if device == 'dev' and partition == '9' and suffixes == ['abc']:
|
|
yield (
|
|
'/srv/node/dev/objects/9/abc/'
|
|
'9d41d8cd98f00b204e9800998ecf0abc',
|
|
'9d41d8cd98f00b204e9800998ecf0abc',
|
|
'1380144470.00000')
|
|
else:
|
|
raise Exception(
|
|
'No match for %r %r %r' % (device, partition, suffixes))
|
|
|
|
self.sender.connection = FakeConnection()
|
|
self.sender.job = {'device': 'dev', 'partition': '9'}
|
|
self.sender.suffixes = ['abc']
|
|
self.sender.response = FakeResponse(
|
|
chunk_body=(
|
|
':MISSING_CHECK: START\r\n'
|
|
'0123abc\r\n'
|
|
':MISSING_CHECK: END\r\n'))
|
|
self.sender.daemon._diskfile_mgr.yield_hashes = yield_hashes
|
|
self.sender.missing_check()
|
|
self.assertEqual(
|
|
''.join(self.sender.connection.sent),
|
|
'17\r\n:MISSING_CHECK: START\r\n\r\n'
|
|
'33\r\n9d41d8cd98f00b204e9800998ecf0abc 1380144470.00000\r\n\r\n'
|
|
'15\r\n:MISSING_CHECK: END\r\n\r\n')
|
|
self.assertEqual(self.sender.send_list, ['0123abc'])
|
|
|
|
def test_updates_timeout(self):
|
|
self.sender.connection = FakeConnection()
|
|
self.sender.connection.send = lambda d: eventlet.sleep(1)
|
|
self.sender.daemon.node_timeout = 0.01
|
|
self.assertRaises(exceptions.MessageTimeout, self.sender.updates)
|
|
|
|
def test_updates_empty_send_list(self):
|
|
self.sender.connection = FakeConnection()
|
|
self.sender.send_list = []
|
|
self.sender.response = FakeResponse(
|
|
chunk_body=(
|
|
':UPDATES: START\r\n'
|
|
':UPDATES: END\r\n'))
|
|
self.sender.updates()
|
|
self.assertEqual(
|
|
''.join(self.sender.connection.sent),
|
|
'11\r\n:UPDATES: START\r\n\r\n'
|
|
'f\r\n:UPDATES: END\r\n\r\n')
|
|
|
|
def test_updates_unexpected_response_lines1(self):
|
|
self.sender.connection = FakeConnection()
|
|
self.sender.send_list = []
|
|
self.sender.response = FakeResponse(
|
|
chunk_body=(
|
|
'abc\r\n'
|
|
':UPDATES: START\r\n'
|
|
':UPDATES: END\r\n'))
|
|
exc = None
|
|
try:
|
|
self.sender.updates()
|
|
except exceptions.ReplicationException as err:
|
|
exc = err
|
|
self.assertEqual(str(exc), "Unexpected response: 'abc'")
|
|
self.assertEqual(
|
|
''.join(self.sender.connection.sent),
|
|
'11\r\n:UPDATES: START\r\n\r\n'
|
|
'f\r\n:UPDATES: END\r\n\r\n')
|
|
|
|
def test_updates_unexpected_response_lines2(self):
|
|
self.sender.connection = FakeConnection()
|
|
self.sender.send_list = []
|
|
self.sender.response = FakeResponse(
|
|
chunk_body=(
|
|
':UPDATES: START\r\n'
|
|
'abc\r\n'
|
|
':UPDATES: END\r\n'))
|
|
exc = None
|
|
try:
|
|
self.sender.updates()
|
|
except exceptions.ReplicationException as err:
|
|
exc = err
|
|
self.assertEqual(str(exc), "Unexpected response: 'abc'")
|
|
self.assertEqual(
|
|
''.join(self.sender.connection.sent),
|
|
'11\r\n:UPDATES: START\r\n\r\n'
|
|
'f\r\n:UPDATES: END\r\n\r\n')
|
|
|
|
def test_updates_is_deleted(self):
|
|
self.sender.connection = FakeConnection()
|
|
self.sender.job = {'device': 'dev', 'partition': '9'}
|
|
self.sender.node = {}
|
|
self.sender.send_list = ['0123abc']
|
|
self.sender.send_delete = mock.MagicMock()
|
|
self.sender.send_put = mock.MagicMock()
|
|
self.sender.response = FakeResponse(
|
|
chunk_body=(
|
|
':UPDATES: START\r\n'
|
|
':UPDATES: END\r\n'))
|
|
df = self.sender.daemon._diskfile_mgr.get_diskfile_from_hash()
|
|
df.account = 'a'
|
|
df.container = 'c'
|
|
df.obj = 'o'
|
|
dfdel = exceptions.DiskFileDeleted()
|
|
dfdel.timestamp = '1381679759.90941'
|
|
df.open.side_effect = dfdel
|
|
self.sender.updates()
|
|
self.sender.send_delete.assert_called_once_with(
|
|
'/a/c/o', '1381679759.90941')
|
|
self.assertEqual(self.sender.send_put.mock_calls, [])
|
|
# note that the delete line isn't actually sent since we mock
|
|
# send_delete; send_delete is tested separately.
|
|
self.assertEqual(
|
|
''.join(self.sender.connection.sent),
|
|
'11\r\n:UPDATES: START\r\n\r\n'
|
|
'f\r\n:UPDATES: END\r\n\r\n')
|
|
|
|
def test_updates_put(self):
|
|
self.sender.connection = FakeConnection()
|
|
self.sender.job = {'device': 'dev', 'partition': '9'}
|
|
self.sender.node = {}
|
|
self.sender.send_list = ['0123abc']
|
|
df = mock.MagicMock()
|
|
df.get_metadata.return_value = {'Content-Length': 123}
|
|
self.sender.send_delete = mock.MagicMock()
|
|
self.sender.send_put = mock.MagicMock()
|
|
self.sender.response = FakeResponse(
|
|
chunk_body=(
|
|
':UPDATES: START\r\n'
|
|
':UPDATES: END\r\n'))
|
|
df = self.sender.daemon._diskfile_mgr.get_diskfile_from_hash()
|
|
df.account = 'a'
|
|
df.container = 'c'
|
|
df.obj = 'o'
|
|
self.sender.updates()
|
|
self.assertEqual(self.sender.send_delete.mock_calls, [])
|
|
self.sender.send_put.assert_called_once_with('/a/c/o', df)
|
|
# note that the put line isn't actually sent since we mock send_put;
|
|
# send_put is tested separately.
|
|
self.assertEqual(
|
|
''.join(self.sender.connection.sent),
|
|
'11\r\n:UPDATES: START\r\n\r\n'
|
|
'f\r\n:UPDATES: END\r\n\r\n')
|
|
|
|
def test_updates_read_response_timeout_start(self):
|
|
self.sender.connection = FakeConnection()
|
|
self.sender.send_list = []
|
|
self.sender.response = FakeResponse(
|
|
chunk_body=(
|
|
':UPDATES: START\r\n'
|
|
':UPDATES: END\r\n'))
|
|
orig_readline = self.sender.readline
|
|
|
|
def delayed_readline():
|
|
eventlet.sleep(1)
|
|
return orig_readline()
|
|
|
|
self.sender.readline = delayed_readline
|
|
self.sender.daemon.http_timeout = 0.01
|
|
self.assertRaises(exceptions.MessageTimeout, self.sender.updates)
|
|
|
|
def test_updates_read_response_disconnect_start(self):
|
|
self.sender.connection = FakeConnection()
|
|
self.sender.send_list = []
|
|
self.sender.response = FakeResponse(chunk_body='\r\n')
|
|
exc = None
|
|
try:
|
|
self.sender.updates()
|
|
except exceptions.ReplicationException as err:
|
|
exc = err
|
|
self.assertEqual(str(exc), 'Early disconnect')
|
|
self.assertEqual(
|
|
''.join(self.sender.connection.sent),
|
|
'11\r\n:UPDATES: START\r\n\r\n'
|
|
'f\r\n:UPDATES: END\r\n\r\n')
|
|
|
|
def test_updates_read_response_unexp_start(self):
|
|
self.sender.connection = FakeConnection()
|
|
self.sender.send_list = []
|
|
self.sender.response = FakeResponse(
|
|
chunk_body=(
|
|
'anything else\r\n'
|
|
':UPDATES: START\r\n'
|
|
':UPDATES: END\r\n'))
|
|
exc = None
|
|
try:
|
|
self.sender.updates()
|
|
except exceptions.ReplicationException as err:
|
|
exc = err
|
|
self.assertEqual(str(exc), "Unexpected response: 'anything else'")
|
|
self.assertEqual(
|
|
''.join(self.sender.connection.sent),
|
|
'11\r\n:UPDATES: START\r\n\r\n'
|
|
'f\r\n:UPDATES: END\r\n\r\n')
|
|
|
|
def test_updates_read_response_timeout_end(self):
|
|
self.sender.connection = FakeConnection()
|
|
self.sender.send_list = []
|
|
self.sender.response = FakeResponse(
|
|
chunk_body=(
|
|
':UPDATES: START\r\n'
|
|
':UPDATES: END\r\n'))
|
|
orig_readline = self.sender.readline
|
|
|
|
def delayed_readline():
|
|
rv = orig_readline()
|
|
if rv == ':UPDATES: END\r\n':
|
|
eventlet.sleep(1)
|
|
return rv
|
|
|
|
self.sender.readline = delayed_readline
|
|
self.sender.daemon.http_timeout = 0.01
|
|
self.assertRaises(exceptions.MessageTimeout, self.sender.updates)
|
|
|
|
def test_updates_read_response_disconnect_end(self):
|
|
self.sender.connection = FakeConnection()
|
|
self.sender.send_list = []
|
|
self.sender.response = FakeResponse(
|
|
chunk_body=(
|
|
':UPDATES: START\r\n'
|
|
'\r\n'))
|
|
exc = None
|
|
try:
|
|
self.sender.updates()
|
|
except exceptions.ReplicationException as err:
|
|
exc = err
|
|
self.assertEqual(str(exc), 'Early disconnect')
|
|
self.assertEqual(
|
|
''.join(self.sender.connection.sent),
|
|
'11\r\n:UPDATES: START\r\n\r\n'
|
|
'f\r\n:UPDATES: END\r\n\r\n')
|
|
|
|
def test_updates_read_response_unexp_end(self):
|
|
self.sender.connection = FakeConnection()
|
|
self.sender.send_list = []
|
|
self.sender.response = FakeResponse(
|
|
chunk_body=(
|
|
':UPDATES: START\r\n'
|
|
'anything else\r\n'
|
|
':UPDATES: END\r\n'))
|
|
exc = None
|
|
try:
|
|
self.sender.updates()
|
|
except exceptions.ReplicationException as err:
|
|
exc = err
|
|
self.assertEqual(str(exc), "Unexpected response: 'anything else'")
|
|
self.assertEqual(
|
|
''.join(self.sender.connection.sent),
|
|
'11\r\n:UPDATES: START\r\n\r\n'
|
|
'f\r\n:UPDATES: END\r\n\r\n')
|
|
|
|
def test_send_delete_timeout(self):
|
|
self.sender.connection = FakeConnection()
|
|
self.sender.connection.send = lambda d: eventlet.sleep(1)
|
|
self.sender.daemon.node_timeout = 0.01
|
|
exc = None
|
|
try:
|
|
self.sender.send_delete('/a/c/o', '1381679759.90941')
|
|
except exceptions.MessageTimeout as err:
|
|
exc = err
|
|
self.assertEqual(str(exc), '0.01 seconds: send_delete')
|
|
|
|
def test_send_delete(self):
|
|
self.sender.connection = FakeConnection()
|
|
self.sender.send_delete('/a/c/o', '1381679759.90941')
|
|
self.assertEqual(
|
|
''.join(self.sender.connection.sent),
|
|
'30\r\n'
|
|
'DELETE /a/c/o\r\n'
|
|
'X-Timestamp: 1381679759.90941\r\n'
|
|
'\r\n\r\n')
|
|
|
|
def test_send_put_initial_timeout(self):
|
|
self.sender.connection = FakeConnection()
|
|
df = mock.MagicMock()
|
|
self.sender.connection.send = lambda d: eventlet.sleep(1)
|
|
self.sender.daemon.node_timeout = 0.01
|
|
df.get_metadata.return_value = {
|
|
'name': '/a/c/o',
|
|
'X-Timestamp': '1381679759.90941',
|
|
'Content-Length': '3',
|
|
'Etag': '900150983cd24fb0d6963f7d28e17f72',
|
|
'Some-Other-Header': 'value'}
|
|
df.content_length = 3
|
|
df.__iter__ = lambda x: iter(['ab', 'c'])
|
|
exc = None
|
|
try:
|
|
self.sender.send_put('/a/c/o', df)
|
|
except exceptions.MessageTimeout as err:
|
|
exc = err
|
|
self.assertEqual(str(exc), '0.01 seconds: send_put')
|
|
|
|
def test_send_put_chunk_timeout(self):
|
|
self.sender.connection = FakeConnection()
|
|
df = mock.MagicMock()
|
|
self.sender.daemon.node_timeout = 0.01
|
|
df.get_metadata.return_value = {
|
|
'name': '/a/c/o',
|
|
'X-Timestamp': '1381679759.90941',
|
|
'Content-Length': '3',
|
|
'Etag': '900150983cd24fb0d6963f7d28e17f72',
|
|
'Some-Other-Header': 'value'}
|
|
|
|
def iterator(dontcare):
|
|
self.sender.connection.send = lambda d: eventlet.sleep(1)
|
|
return iter(['ab', 'c'])
|
|
|
|
df.content_length = 3
|
|
df.reader().__iter__ = iterator
|
|
exc = None
|
|
try:
|
|
self.sender.send_put('/a/c/o', df)
|
|
except exceptions.MessageTimeout as err:
|
|
exc = err
|
|
self.assertEqual(str(exc), '0.01 seconds: send_put chunk')
|
|
|
|
def test_send_put(self):
|
|
self.sender.connection = FakeConnection()
|
|
df = mock.MagicMock()
|
|
df.get_metadata.return_value = {
|
|
'name': '/a/c/o',
|
|
'X-Timestamp': '1381679759.90941',
|
|
'Content-Length': '3',
|
|
'Etag': '900150983cd24fb0d6963f7d28e17f72',
|
|
'Some-Other-Header': 'value'}
|
|
df.content_length = 3
|
|
df.reader().__iter__ = lambda x: iter(['ab', 'c'])
|
|
self.sender.send_put('/a/c/o', df)
|
|
self.assertEqual(
|
|
''.join(self.sender.connection.sent),
|
|
'82\r\n'
|
|
'PUT /a/c/o\r\n'
|
|
'Content-Length: 3\r\n'
|
|
'Etag: 900150983cd24fb0d6963f7d28e17f72\r\n'
|
|
'Some-Other-Header: value\r\n'
|
|
'X-Timestamp: 1381679759.90941\r\n'
|
|
'\r\n'
|
|
'\r\n'
|
|
'2\r\n'
|
|
'ab\r\n'
|
|
'1\r\n'
|
|
'c\r\n')
|
|
|
|
def test_disconnect_timeout(self):
|
|
self.sender.connection = FakeConnection()
|
|
self.sender.connection.send = lambda d: eventlet.sleep(1)
|
|
self.sender.daemon.node_timeout = 0.01
|
|
self.sender.disconnect()
|
|
self.assertEqual(''.join(self.sender.connection.sent), '')
|
|
self.assertTrue(self.sender.connection.closed)
|
|
|
|
def test_disconnect(self):
|
|
self.sender.connection = FakeConnection()
|
|
self.sender.disconnect()
|
|
self.assertEqual(''.join(self.sender.connection.sent), '0\r\n\r\n')
|
|
self.assertTrue(self.sender.connection.closed)
|
|
|
|
|
|
if __name__ == '__main__':
|
|
unittest.main()
|