[project @ Arch-1:robey@lag.net--2003-public%secsh--dev--1.0--patch-153]

tweak sftp_file write behavior on large blocks of data
BufferedFile.write() wasn't correctly dealing with the possibility that the
underlying write might not write the entire data block at once (even though
the docs said it would).  now that it's working, make sftp_file take
advantage of it in order to chop up blocks larger than 32kB (the max allowed
on sftp) and add a unit test for it.
This commit is contained in:
Robey Pointer 2005-02-28 07:49:56 +00:00
parent 2746d44906
commit 7490172401
3 changed files with 36 additions and 19 deletions

View File

@ -314,7 +314,7 @@ class BufferedFile (object):
last_newline_pos += len(wbuf) - len(data)
self._write_all(wbuf[:last_newline_pos + 1])
self._wbuffer = StringIO()
self._wbuffer.write(wbuf[last_newline_pos+1:])
self._wbuffer.write(wbuf[last_newline_pos + 1:])
return
# even if we're line buffering, if the buffer has grown past the
# buffer size, force a flush.
@ -412,16 +412,15 @@ class BufferedFile (object):
def _write_all(self, data):
# the underlying stream may be something that does partial writes (like
# a socket).
total = len(data)
while data:
while len(data) > 0:
count = self._write(data)
data = data[count:]
if self._flags & _FLAG_APPEND:
self._size += total
self._pos = self._realpos = self._size
else:
self._pos += total
self._realpos += total
if self._flags & _FLAG_APPEND:
self._size += count
self._pos = self._realpos = self._size
else:
self._pos += count
self._realpos += count
return None
def _record_newline(self, newline):

View File

@ -1,6 +1,6 @@
#!/usr/bin/python
# Copyright (C) 2003-2004 Robey Pointer <robey@lag.net>
# Copyright (C) 2003-2005 Robey Pointer <robey@lag.net>
#
# This file is part of paramiko.
#
@ -55,13 +55,14 @@ class SFTPFile (BufferedFile):
return msg.get_string()
def _write(self, data):
offset = 0
while offset < len(data):
chunk = min(len(data) - offset, self.MAX_REQUEST_SIZE)
t, msg = self.sftp._request(CMD_WRITE, self.handle, long(self._realpos + offset),
str(data[offset : offset + chunk]))
offset += chunk
return len(data)
# may write less than requested if it would exceed max packet size
chunk = min(len(data), self.MAX_REQUEST_SIZE)
t, msg = self.sftp._request(CMD_WRITE, self.handle, long(self._realpos),
str(data[:chunk]))
if t != CMD_STATUS:
raise SFTPError('Expected status')
self.sftp._convert_status(msg)
return chunk
def settimeout(self, timeout):
"""

View File

@ -438,7 +438,24 @@ class SFTPTest (unittest.TestCase):
finally:
sftp.remove('%s/hongry.txt' % FOLDER)
def test_E_realpath(self):
def test_E_big_file_no_buffer(self):
"""
write a 1MB file, with no linefeeds, and no buffering.
"""
global g_big_file_test
if not g_big_file_test:
return
kblob = (1024 * 1024 * 'x')
try:
f = sftp.open('%s/hongry.txt' % FOLDER, 'w')
f.write(kblob)
f.close()
self.assertEqual(sftp.stat('%s/hongry.txt' % FOLDER).st_size, 1024 * 1024)
finally:
sftp.remove('%s/hongry.txt' % FOLDER)
def test_F_realpath(self):
"""
test that realpath is returning something non-empty and not an
error.
@ -449,7 +466,7 @@ class SFTPTest (unittest.TestCase):
self.assert_(len(f) > 0)
self.assertEquals(os.path.join(pwd, FOLDER), f)
def test_F_mkdir(self):
def test_G_mkdir(self):
"""
verify that mkdir/rmdir work.
"""