@@ -259,7 +259,7 @@ def __init__(self, conf):
259
259
self .log_requests = conf .get ('log_requests' , 't' )[:1 ].lower () == 't'
260
260
self .max_upload_time = int (conf .get ('max_upload_time' , 86400 ))
261
261
self .slow = int (conf .get ('slow' , 0 ))
262
- self .chunks_per_sync = int (conf .get ('chunks_per_sync ' , 8000 ))
262
+ self .bytes_per_sync = int (conf .get ('mb_per_sync ' , 512 )) * 1024 * 1024
263
263
264
264
def container_update (self , op , account , container , obj , headers_in ,
265
265
headers_out , objdevice ):
@@ -359,11 +359,10 @@ def PUT(self, request):
359
359
upload_expiration = time .time () + self .max_upload_time
360
360
etag = md5 ()
361
361
upload_size = 0
362
+ last_sync = 0
362
363
with file .mkstemp () as (fd , tmppath ):
363
364
if 'content-length' in request .headers :
364
365
fallocate (fd , int (request .headers ['content-length' ]))
365
- chunk_count = 0
366
- dropped_cache = 0
367
366
for chunk in iter (lambda : request .body_file .read (
368
367
self .network_chunk_size ), '' ):
369
368
upload_size += len (chunk )
@@ -373,13 +372,11 @@ def PUT(self, request):
373
372
while chunk :
374
373
written = os .write (fd , chunk )
375
374
chunk = chunk [written :]
376
- chunk_count += 1
377
375
# For large files sync every 512MB (by default) written
378
- if chunk_count % self . chunks_per_sync == 0 :
376
+ if upload_size - last_sync >= self . bytes_per_sync :
379
377
os .fdatasync (fd )
380
- drop_buffer_cache (fd , dropped_cache ,
381
- upload_size - dropped_cache )
382
- dropped_cache = upload_size
378
+ drop_buffer_cache (fd , last_sync , upload_size - last_sync )
379
+ last_sync = upload_size
383
380
384
381
if 'content-length' in request .headers and \
385
382
int (request .headers ['content-length' ]) != upload_size :
0 commit comments