Merge "Added per disk PUT timing monitoring support."
This commit is contained in:
commit
28ac46ded3
@ -756,6 +756,9 @@ Metric Name Description
|
|||||||
`object-server.PUT.timeouts` Count of object PUTs which exceeded max_upload_time.
|
`object-server.PUT.timeouts` Count of object PUTs which exceeded max_upload_time.
|
||||||
`object-server.PUT.timing` Timing data for each PUT request not resulting in an
|
`object-server.PUT.timing` Timing data for each PUT request not resulting in an
|
||||||
error.
|
error.
|
||||||
|
`object-server.PUT.<device>.timing` Timing data per kB transfered (ms/kB) for each
|
||||||
|
non-zero-byte PUT request on each device.
|
||||||
|
Monitoring problematic devices, higher is bad.
|
||||||
`object-server.GET.errors.timing` Timing data for GET request errors: bad request,
|
`object-server.GET.errors.timing` Timing data for GET request errors: bad request,
|
||||||
not mounted, header timestamps before the epoch,
|
not mounted, header timestamps before the epoch,
|
||||||
precondition failed.
|
precondition failed.
|
||||||
|
@ -492,6 +492,12 @@ class StatsdClient(object):
|
|||||||
return self.timing(metric, (time.time() - orig_time) * 1000,
|
return self.timing(metric, (time.time() - orig_time) * 1000,
|
||||||
sample_rate)
|
sample_rate)
|
||||||
|
|
||||||
|
def transfer_rate(self, metric, elasped_time, byte_xfer, sample_rate=None):
|
||||||
|
if byte_xfer:
|
||||||
|
return self.timing(metric,
|
||||||
|
elasped_time * 1000 / byte_xfer * 1000,
|
||||||
|
sample_rate)
|
||||||
|
|
||||||
|
|
||||||
def timing_stats(**dec_kwargs):
|
def timing_stats(**dec_kwargs):
|
||||||
"""
|
"""
|
||||||
@ -646,6 +652,7 @@ class LogAdapter(logging.LoggerAdapter, object):
|
|||||||
decrement = statsd_delegate('decrement')
|
decrement = statsd_delegate('decrement')
|
||||||
timing = statsd_delegate('timing')
|
timing = statsd_delegate('timing')
|
||||||
timing_since = statsd_delegate('timing_since')
|
timing_since = statsd_delegate('timing_since')
|
||||||
|
transfer_rate = statsd_delegate('transfer_rate')
|
||||||
|
|
||||||
|
|
||||||
class SwiftLogFormatter(logging.Formatter):
|
class SwiftLogFormatter(logging.Formatter):
|
||||||
|
@ -643,6 +643,7 @@ class ObjectController(object):
|
|||||||
etag = md5()
|
etag = md5()
|
||||||
upload_size = 0
|
upload_size = 0
|
||||||
last_sync = 0
|
last_sync = 0
|
||||||
|
elasped_time = 0
|
||||||
with file.mkstemp() as fd:
|
with file.mkstemp() as fd:
|
||||||
try:
|
try:
|
||||||
fallocate(fd, int(request.headers.get('content-length', 0)))
|
fallocate(fd, int(request.headers.get('content-length', 0)))
|
||||||
@ -650,6 +651,7 @@ class ObjectController(object):
|
|||||||
return HTTPInsufficientStorage(drive=device, request=request)
|
return HTTPInsufficientStorage(drive=device, request=request)
|
||||||
reader = request.environ['wsgi.input'].read
|
reader = request.environ['wsgi.input'].read
|
||||||
for chunk in iter(lambda: reader(self.network_chunk_size), ''):
|
for chunk in iter(lambda: reader(self.network_chunk_size), ''):
|
||||||
|
start_time = time.time()
|
||||||
upload_size += len(chunk)
|
upload_size += len(chunk)
|
||||||
if time.time() > upload_expiration:
|
if time.time() > upload_expiration:
|
||||||
self.logger.increment('PUT.timeouts')
|
self.logger.increment('PUT.timeouts')
|
||||||
@ -664,6 +666,11 @@ class ObjectController(object):
|
|||||||
drop_buffer_cache(fd, last_sync, upload_size - last_sync)
|
drop_buffer_cache(fd, last_sync, upload_size - last_sync)
|
||||||
last_sync = upload_size
|
last_sync = upload_size
|
||||||
sleep()
|
sleep()
|
||||||
|
elasped_time += time.time() - start_time
|
||||||
|
|
||||||
|
if upload_size:
|
||||||
|
self.logger.transfer_rate(
|
||||||
|
'PUT.' + device + '.timing', elasped_time, upload_size)
|
||||||
|
|
||||||
if 'content-length' in request.headers and \
|
if 'content-length' in request.headers and \
|
||||||
int(request.headers['content-length']) != upload_size:
|
int(request.headers['content-length']) != upload_size:
|
||||||
|
Loading…
Reference in New Issue
Block a user