[project @ Arch-1:robey@lag.net--2003-public%secsh--dev--1.0--patch-27]

add BufferedFile abstraction
SFTP client mode is mostly functional.  there are probably still some bugs
but most of the operations on "file" objects have survived my simple tests.

BufferedFile wraps a simpler stream in something that looks like a python
file (and can even handle seeking if the stream underneath supports it).
it's meant to be subclassed.  most of it is ripped out of what used to be
ChannelFile so i can reuse it for sftp -- ChannelFile is now tiny.

SFTP and Message are now exported.

fixed util.format_binary_line to not quote spaces.
This commit is contained in:
Robey Pointer 2004-03-04 08:21:45 +00:00
parent d599570905
commit 3e31771637
7 changed files with 972 additions and 152 deletions

157
demo_sftp.py Executable file
View File

@ -0,0 +1,157 @@
#!/usr/bin/python
import sys, os, socket, threading, getpass, logging, time, base64, select, termios, tty, traceback
import paramiko
##### utility functions
def load_host_keys():
filename = os.environ['HOME'] + '/.ssh/known_hosts'
keys = {}
try:
f = open(filename, 'r')
except Exception, e:
print '*** Unable to open host keys file (%s)' % filename
return
for line in f:
keylist = line.split(' ')
if len(keylist) != 3:
continue
hostlist, keytype, key = keylist
hosts = hostlist.split(',')
for host in hosts:
if not keys.has_key(host):
keys[host] = {}
keys[host][keytype] = base64.decodestring(key)
f.close()
return keys
##### main demo
# setup logging
l = logging.getLogger("paramiko")
l.setLevel(logging.DEBUG)
if len(l.handlers) == 0:
f = open('demo.log', 'w')
lh = logging.StreamHandler(f)
lh.setFormatter(logging.Formatter('%(levelname)-.3s [%(asctime)s] %(name)s: %(message)s', '%Y%m%d:%H%M%S'))
l.addHandler(lh)
username = ''
if len(sys.argv) > 1:
hostname = sys.argv[1]
if hostname.find('@') >= 0:
username, hostname = hostname.split('@')
else:
hostname = raw_input('Hostname: ')
if len(hostname) == 0:
print '*** Hostname required.'
sys.exit(1)
port = 22
if hostname.find(':') >= 0:
hostname, portstr = hostname.split(':')
port = int(portstr)
# now connect
try:
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.connect((hostname, port))
except Exception, e:
print '*** Connect failed: ' + str(e)
traceback.print_exc()
sys.exit(1)
try:
event = threading.Event()
t = paramiko.Transport(sock)
t.start_client(event)
# print repr(t)
event.wait(15)
if not t.is_active():
print '*** SSH negotiation failed.'
sys.exit(1)
# print repr(t)
keys = load_host_keys()
keytype, hostkey = t.get_remote_server_key()
if not keys.has_key(hostname):
print '*** WARNING: Unknown host key!'
elif not keys[hostname].has_key(keytype):
print '*** WARNING: Unknown host key!'
elif keys[hostname][keytype] != hostkey:
print '*** WARNING: Host key has changed!!!'
sys.exit(1)
else:
print '*** Host key OK.'
event.clear()
# get username
if username == '':
default_username = getpass.getuser()
username = raw_input('Username [%s]: ' % default_username)
if len(username) == 0:
username = default_username
# ask for what kind of authentication to try
default_auth = 'p'
auth = raw_input('Auth by (p)assword, (r)sa key, or (d)ss key? [%s] ' % default_auth)
if len(auth) == 0:
auth = default_auth
if auth == 'r':
key = paramiko.RSAKey()
default_path = os.environ['HOME'] + '/.ssh/id_rsa'
path = raw_input('RSA key [%s]: ' % default_path)
if len(path) == 0:
path = default_path
try:
key.read_private_key_file(path)
except paramiko.PasswordRequiredException:
password = getpass.getpass('RSA key password: ')
key.read_private_key_file(path, password)
t.auth_publickey(username, key, event)
elif auth == 'd':
key = paramiko.DSSKey()
default_path = os.environ['HOME'] + '/.ssh/id_dsa'
path = raw_input('DSS key [%s]: ' % default_path)
if len(path) == 0:
path = default_path
try:
key.read_private_key_file(path)
except paramiko.PasswordRequiredException:
password = getpass.getpass('DSS key password: ')
key.read_private_key_file(path, password)
t.auth_key(username, key, event)
else:
pw = getpass.getpass('Password for %s@%s: ' % (username, hostname))
t.auth_password(username, pw, event)
event.wait(10)
# print repr(t)
if not t.is_authenticated():
print '*** Authentication failed. :('
t.close()
sys.exit(1)
chan = t.open_session()
chan.invoke_subsystem('sftp')
print '*** SFTP...'
sftp = paramiko.SFTP(chan)
print repr(sftp.listdir('/tmp'))
chan.close()
t.close()
except Exception, e:
print '*** Caught exception: ' + str(e.__class__) + ': ' + str(e)
traceback.print_exc()
try:
t.close()
except:
pass
sys.exit(1)

View File

@ -67,27 +67,33 @@ __version__ = "0.9-doduo"
__license__ = "GNU Lesser General Public License (LGPL)"
import transport, auth_transport, channel, rsakey, dsskey, ssh_exception
import transport, auth_transport, channel, rsakey, dsskey, message, ssh_exception, sftp
Transport = auth_transport.Transport
Channel = channel.Channel
RSAKey = rsakey.RSAKey
DSSKey = dsskey.DSSKey
SSHException = ssh_exception.SSHException
Message = message.Message
PasswordRequiredException = ssh_exception.PasswordRequiredException
SFTP = sftp.SFTP
__all__ = [ 'Transport',
'Channel',
'RSAKey',
'DSSKey',
'Message',
'SSHException',
'PasswordRequiredException',
'SFTP',
'transport',
'auth_transport',
'channel',
'rsakey',
'dsskey',
'pkey',
'message',
'ssh_exception',
'sftp',
'util' ]

View File

@ -26,6 +26,7 @@ from message import Message
from ssh_exception import SSHException
from transport import _MSG_CHANNEL_REQUEST, _MSG_CHANNEL_CLOSE, _MSG_CHANNEL_WINDOW_ADJUST, _MSG_CHANNEL_DATA, \
_MSG_CHANNEL_EOF, _MSG_CHANNEL_SUCCESS, _MSG_CHANNEL_FAILURE
from file import BufferedFile
import time, threading, logging, socket, os
from logging import DEBUG
@ -334,7 +335,7 @@ class Channel (object):
@param nbytes: maximum number of bytes to read.
@type nbytes: int
@return: data
@return: data.
@rtype: string
@raise socket.timeout: if no data is ready before the timeout set by
@ -834,7 +835,7 @@ class Channel (object):
self.in_window_sofar = 0
class ChannelFile (object):
class ChannelFile (BufferedFile):
"""
A file-like wrapper around L{Channel}. A ChannelFile is created by calling
L{Channel.makefile} and doesn't have the non-portable side effect of
@ -846,28 +847,10 @@ class ChannelFile (object):
C{ChannelFile} does nothing but flush the buffer.
"""
def __init__(self, channel, mode = "r", buf_size = -1):
def __init__(self, channel, mode = 'r', bufsize = -1):
self.channel = channel
self.mode = mode
if buf_size <= 0:
self.buf_size = 1024
self.line_buffered = 0
elif buf_size == 1:
self.buf_size = 1
self.line_buffered = 1
else:
self.buf_size = buf_size
self.line_buffered = 0
self.wbuffer = ""
self.rbuffer = ""
self.readable = ("r" in mode)
self.writable = ("w" in mode) or ("+" in mode) or ("a" in mode)
self.universal_newlines = ('U' in mode)
self.binary = ("b" in mode)
self.at_trailing_cr = False
self.name = '<file from ' + repr(self.channel) + '>'
self.newlines = None
self.softspace = False
BufferedFile.__init__(self)
self._set_mode(mode, bufsize)
def __repr__(self):
"""
@ -877,134 +860,12 @@ class ChannelFile (object):
"""
return '<paramiko.ChannelFile from ' + repr(self.channel) + '>'
def __iter__(self):
return self
def _read(self, size):
return self.channel.recv(size)
def next(self):
line = self.readline()
if not line:
raise StopIteration
return line
def _write(self, data):
self.channel.sendall(data)
return len(data)
def write(self, str):
if not self.writable:
raise IOError("file not open for writing")
if self.buf_size == 0 and not self.line_buffered:
self.channel.sendall(str)
return
self.wbuffer += str
if self.line_buffered:
last_newline_pos = self.wbuffer.rfind("\n")
if last_newline_pos >= 0:
self.channel.sendall(self.wbuffer[:last_newline_pos+1])
self.wbuffer = self.wbuffer[last_newline_pos+1:]
else:
if len(self.wbuffer) >= self.buf_size:
self.channel.sendall(self.wbuffer)
self.wbuffer = ""
return
def writelines(self, sequence):
for s in sequence:
self.write(s)
return
def flush(self):
self.channel.sendall(self.wbuffer)
self.wbuffer = ""
return
def read(self, size = None):
if not self.readable:
raise IOError("file not open for reading")
if size is None or size < 0:
result = self.rbuffer
self.rbuffer = ""
while not self.channel.eof_received:
new_data = self.channel.recv(65536)
if not new_data:
break
result += new_data
return result
if size <= len(self.rbuffer):
result = self.rbuffer[:size]
self.rbuffer = self.rbuffer[size:]
return result
while len(self.rbuffer) < size and not self.channel.eof_received:
new_data = self.channel.recv(max(self.buf_size, size-len(self.rbuffer)))
if not new_data:
break
self.rbuffer += new_data
result = self.rbuffer[:size]
self.rbuffer[size:]
return result
def readline(self, size=None):
line = self.rbuffer
while 1:
if self.at_trailing_cr and (len(line) > 0):
if line[0] == '\n':
line = line[1:]
self.at_trailing_cr = False
if self.universal_newlines:
if ('\n' in line) or ('\r' in line):
break
else:
if '\n' in line:
break
if size >= 0:
if len(line) >= size:
# truncate line and return
self.rbuffer = line[size:]
line = line[:size]
return line
n = size - len(line)
else:
n = 64
new_data = self.channel.recv(n)
if not new_data:
self.rbuffer = ''
return line
line += new_data
# find the newline
pos = line.find('\n')
if self.universal_newlines:
rpos = line.find('\r')
if (rpos >= 0) and ((rpos < pos) or (pos < 0)):
pos = rpos
xpos = pos + 1
if (line[pos] == '\r') and (xpos < len(line)) and (line[xpos] == '\n'):
xpos += 1
self.rbuffer = line[xpos:]
lf = line[pos:xpos]
line = line[:xpos]
if (len(self.rbuffer) == 0) and (lf == '\r'):
# we could read the line up to a '\r' and there could still be a
# '\n' following that we read next time. note that and eat it.
self.at_trailing_cr = True
# silliness about tracking what kinds of newlines we've seen
if self.newlines is None:
self.newlines = lf
elif (type(self.newlines) is str) and (self.newlines != lf):
self.newlines = (self.newlines, lf)
elif lf not in self.newlines:
self.newlines += (lf,)
return line
def readlines(self, sizehint = None):
lines = []
while 1:
line = self.readline()
if not line:
break
lines.append(line)
return lines
def xreadlines(self):
return self
def close(self):
self.flush()
return
# vim: set shiftwidth=4 expandtab :

428
paramiko/file.py Normal file
View File

@ -0,0 +1,428 @@
#!/usr/bin/python
# Copyright (C) 2003-2004 Robey Pointer <robey@lag.net>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Foobar; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
"""
BufferedFile.
"""
_FLAG_READ = 0x1
_FLAG_WRITE = 0x2
_FLAG_APPEND = 0x4
_FLAG_BINARY = 0x10
_FLAG_BUFFERED = 0x20
_FLAG_LINE_BUFFERED = 0x40
_FLAG_UNIVERSAL_NEWLINE = 0x80
class BufferedFile (object):
"""
Reusable base class to implement python-style file buffering around a
simpler stream
"""
_DEFAULT_BUFSIZE = 1024
SEEK_SET = 0
SEEK_CUR = 1
SEEK_END = 2
def __init__(self):
self._flags = 0
self._bufsize = self._DEFAULT_BUFSIZE
self._wbuffer = self._rbuffer = ''
self._at_trailing_cr = False
self._closed = False
# pos - position within the file, according to the user
# realpos - position according the OS
# (these may be different because we buffer for line reading)
self._pos = self._realpos = 0
def __iter__(self):
"""
Returns an iterator that can be used to iterate over the lines in this
file. This iterator happens to return the file itself, since a file is
its own iterator.
@raise: ValueError if the file is closed.
@return: an interator.
@rtype: iterator
"""
if self._closed:
raise ValueError('I/O operation on closed file')
return self
def close(self):
"""
Close the file. Future read and write operations will fail.
"""
self.flush()
self._closed = True
def flush(self):
"""
Write out any data in the write buffer. This may do nothing if write
buffering is not turned on.
"""
self._write_all(self._wbuffer)
self._wbuffer = ''
return
def next(self):
"""
Returns the next line from the input, or raises L{StopIteration} when
EOF is hit. Unlike python file objects, it's okay to mix calls to
C{next} and L{readline}.
@raise: StopIteration when the end of the file is reached.
@return: a line read from the file.
@rtype: string
"""
line = self.readline()
if not line:
raise StopIteration
return line
def read(self, size=None):
"""
Read at most C{size} bytes from the file (less if we hit the end of the
file first). If the C{size} argument is negative or omitted, read all
the remaining data in the file.
@param size: maximum number of bytes to read.
@type size: int
@return: data read from the file, or an empty string if EOF was
encountered immediately.
@rtype: string
"""
if self._closed:
raise IOError('File is closed')
if not (self._flags & _FLAG_READ):
raise IOError('File not open for reading')
if (size is None) or (size < 0):
# go for broke
result = self.rbuffer
self.rbuffer = ''
self._pos += len(result)
while 1:
try:
new_data = self._read(self._DEFAULT_BUFSIZE)
except EOFError:
new_data = None
if (new_data is None) or (len(new_data) == 0):
break
result += new_data
self._realpos += len(new_data)
self._pos += len(new_data)
return result
if size <= len(self._rbuffer):
result = self._rbuffer[:size]
self._rbuffer = self._rbuffer[size:]
self._pos += len(result)
return result
while len(self._rbuffer) < size:
try:
new_data = self._read(max(self._bufsize, size - len(self._rbuffer)))
except EOFError:
new_data = None
if (new_data is None) or (len(new_data) == 0):
break
self._rbuffer += new_data
self._realpos += len(new_data)
result = self._rbuffer[:size]
self._rbuffer = self._rbuffer[size:]
self._pos += len(result)
return result
def readline(self, size=None):
"""
Read one entire line from the file. A trailing newline character is
kept in the string (but may be absent when a file ends with an
incomplete line). If the size argument is present and non-negative, it
is a maximum byte count (including the trailing newline) and an
incomplete line may be returned. An empty string is returned only when
EOF is encountered immediately.
@note: Unlike stdio's C{fgets()}, the returned string contains null
characters (C{'\0'}) if they occurred in the input.
@param size: maximum length of returned string.
@type size: int
@return: next line of the file, or an empty string if the end of the
file has been reached.
@rtype: string
"""
# it's almost silly how complex this function is.
if self._closed:
raise IOError('File is closed')
line = self._rbuffer
while 1:
if self._at_trailing_cr and (self._flags & _FLAG_UNIVERSAL_NEWLINE) and (len(line) > 0):
# edge case: the newline may be '\r\n' and we may have read
# only the first '\r' last time.
if line[0] == '\n':
line = line[1:]
self._record_newline('\r\n')
else:
self._record_newline('\r')
self._at_trailing_cr = False
# check size before looking for a linefeed, in case we already have
# enough.
if (size is not None) and (size >= 0):
if len(line) >= size:
# truncate line and return
self.rbuffer = line[size:]
line = line[:size]
self._pos += len(line)
return line
n = size - len(line)
else:
n = self._DEFAULT_BUFSIZE
if ('\n' in line) or ((self._flags & _FLAG_UNIVERSAL_NEWLINE) and ('\r' in line)):
break
try:
new_data = self._read(n)
except EOFError:
new_data = None
if (new_data is None) or (len(new_data) == 0):
self._rbuffer = ''
self._pos += len(line)
return line
line += new_data
self._realpos += len(new_data)
# find the newline
pos = line.find('\n')
if self._flags & _FLAG_UNIVERSAL_NEWLINE:
rpos = line.find('\r')
if (rpos >= 0) and ((rpos < pos) or (pos < 0)):
pos = rpos
xpos = pos + 1
if (line[pos] == '\r') and (xpos < len(line)) and (line[xpos] == '\n'):
xpos += 1
self._rbuffer = line[xpos:]
lf = line[pos:xpos]
line = line[:xpos]
if (len(self._rbuffer) == 0) and (lf == '\r'):
# we could read the line up to a '\r' and there could still be a
# '\n' following that we read next time. note that and eat it.
self._at_trailing_cr = True
else:
self._record_newline(lf)
self._pos += len(line)
return line
def readlines(self, sizehint=None):
"""
Read all remaining lines using L{readline} and return them as a list.
If the optional C{sizehint} argument is present, instead of reading up
to EOF, whole lines totalling approximately sizehint bytes (possibly
after rounding up to an internal buffer size) are read.
@param sizehint: desired maximum number of bytes to read.
@type sizehint: int
@return: list of lines read from the file.
@rtype: list
"""
lines = []
bytes = 0
while 1:
line = self.readline()
if len(line) == 0:
break
lines.append(line)
bytes += len(line)
if (sizehint is not None) and (bytes >= sizehint):
break
return lines
def seek(self, offset, whence=0):
"""
Set the file's current position, like stdio's C{fseek}. Not all file
objects support seeking.
@note: If a file is opened in append mode (C{'a'} or C{'a+'}), any seek
operations will be undone at the next write (as the file position will
move back to the end of the file).
@param offset: position to move to within the file, relative to
C{whence}.
@type offset: int
@param whence: type of movement: 0 = absolute; 1 = relative to the
current position; 2 = relative to the end of the file.
@type whence: int
@raise IOError: if the file doesn't support random access.
"""
raise IOError('File does not support seeking.')
def tell(self):
"""
Return the file's current position. This may not be accurate or
useful if the underlying file doesn't support random access, or was
opened in append mode.
@return: file position (in bytes).
@rtype: int
"""
return self._pos
def write(self, data):
"""
Write data to the file. If write buffering is on (C{bufsize} was
specified and non-zero, some or all of the data may not actually be
written yet. (Use L{flush} or L{close} to force buffered data to be
written out.)
@param data: data to write.
@type data: string
"""
if self._closed:
raise IOError('File is closed')
if not (self._flags & _FLAG_WRITE):
raise IOError('File not open for writing')
if not (self._flags & _FLAG_BUFFERED):
self._write_all(data)
return
self._wbuffer += data
if self._flags & _FLAG_LINE_BUFFERED:
last_newline_pos = self._wbuffer.rfind('\n')
if last_newline_pos >= 0:
self._write_all(self._wbuffer[:last_newline_pos + 1])
self._wbuffer = self._wbuffer[last_newline_pos+1:]
else:
if len(self._wbuffer) >= self._bufsize:
self._write_all(self._wbuffer)
self._wbuffer = ''
return
def writelines(self, sequence):
"""
Write a sequence of strings to the file. The sequence can be any
iterable object producing strings, typically a list of strings. (The
name is intended to match L{readlines}; C{writelines} does not add line
separators.)
@param sequence: an iterable sequence of strings.
@type sequence: sequence
"""
for line in sequence:
self.write(line)
return
def xreadlines(self):
"""
Identical to C{iter(f)}. This is a deprecated file interface that
predates python iterator support.
@return: an iterator.
@rtype: iterator
"""
return self
### overrides...
def _read(self, size):
"""
I{(subclass override)}
Read data from the stream. Return C{None} or raise C{EOFError} to
indicate EOF.
"""
raise EOFError()
def _write(self, data):
"""
I{(subclass override)}
Write data into the stream.
"""
raise IOError('write not implemented')
def _get_size(self):
"""
I{(subclass override)}
Return the size of the file. This is called from within L{_set_mode}
if the file is opened in append mode, so the file position can be
tracked and L{seek} and L{tell} will work correctly. If the file is
a stream that can't be randomly accessed, you don't need to override
this method,
"""
return 0
### internals...
def _set_mode(self, mode='r', bufsize=-1):
"""
Subclasses call this method to initialize the BufferedFile.
"""
if bufsize == 1:
# apparently, line buffering only affects writes. reads are only
# buffered if you call readline (directly or indirectly: iterating
# over a file will indirectly call readline).
self._flags |= _FLAG_BUFFERED | _FLAG_LINE_BUFFERED
elif bufsize > 1:
self._bufsize = bufsize
self._flags |= _FLAG_BUFFERED
if ('r' in mode) or ('+' in mode):
self._flags |= _FLAG_READ
if ('w' in mode) or ('+' in mode):
self._flags |= _FLAG_WRITE
if ('a' in mode):
self._flags |= _FLAG_WRITE | _FLAG_APPEND
self._size = self._get_size()
self._pos = self._realpos = self._size
if ('b' in mode):
self._flags |= _FLAG_BINARY
if ('U' in mode):
self._flags |= _FLAG_UNIVERSAL_NEWLINE
# built-in file objects have this attribute to store which kinds of
# line terminations they've seen:
# <http://www.python.org/doc/current/lib/built-in-funcs.html>
self.newlines = None
def _write_all(self, data):
# the underlying stream may be something that does partial writes (like
# a socket).
total = len(data)
while data:
count = self._write(data)
data = data[count:]
if self._flags & _FLAG_APPEND:
self._size += total
self._pos = self._realpos = self._size
else:
self._pos += total
self._realpos += total
return None
def _record_newline(self, newline):
# silliness about tracking what kinds of newlines we've seen.
# i don't understand why it can be None, a string, or a tuple, instead
# of just always being a tuple, but we'll emulate that behavior anyway.
if not (self._flags & _FLAG_UNIVERSAL_NEWLINE):
return
if self.newlines is None:
self.newlines = newline
elif (type(self.newlines) is str) and (self.newlines != newline):
self.newlines = (self.newlines, newline)
elif newline not in self.newlines:
self.newlines += (newline,)

View File

@ -207,6 +207,16 @@ class Message (object):
self.packet = self.packet + struct.pack('>I', n)
return self
def add_int64(self, n):
"""
Add a 64-bit int to the stream.
@param n: long int to add.
@type n: long
"""
self.packet = self.packet + struct.pack('>Q', n)
return self
def add_mpint(self, z):
"this only works on positive numbers"
self.add_string(deflate_long(z))

358
paramiko/sftp.py Normal file
View File

@ -0,0 +1,358 @@
#!/usr/bin/python
# Copyright (C) 2003-2004 Robey Pointer <robey@lag.net>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distrubuted in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Foobar; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
import struct, logging, socket
from util import format_binary, tb_strings
from channel import Channel
from message import Message
from logging import DEBUG, INFO, WARNING, ERROR, CRITICAL
from file import BufferedFile
CMD_INIT, CMD_VERSION, CMD_OPEN, CMD_CLOSE, CMD_READ, CMD_WRITE, CMD_LSTAT, CMD_FSTAT, CMD_SETSTAT, \
CMD_FSETSTAT, CMD_OPENDIR, CMD_READDIR, CMD_REMOVE, CMD_MKDIR, CMD_RMDIR, CMD_REALPATH, \
CMD_STAT, CMD_RENAME, CMD_READLINK, CMD_SYMLINK = range(1, 21)
CMD_STATUS, CMD_HANDLE, CMD_DATA, CMD_NAME, CMD_ATTRS = range(101, 106)
CMD_EXTENDED, CMD_EXTENDED_REPLY = range(200, 202)
FX_OK = 0
FX_EOF, FX_NO_SUCH_FILE, FX_PERMISSION_DENIED, FX_FAILURE, FX_BAD_MESSAGE, FX_NO_CONNECTION, \
FX_CONNECTION_LOST, FX_OP_UNSUPPORTED = range(1, 9)
VERSION = 3
class SFTPAttributes (object):
FLAG_SIZE = 1
FLAG_UIDGID = 2
FLAG_PERMISSIONS = 4
FLAG_AMTIME = 8
FLAG_EXTENDED = 0x80000000L
def __init__(self, msg=None):
self.flags = 0
self.attr = {}
if msg is not None:
self.unpack(msg)
def unpack(self, msg):
self.flags = msg.get_int()
if self.flags & self.FLAG_SIZE:
self.size = msg.get_int64()
if self.flags & self.FLAG_UIDGID:
self.uid = msg.get_int()
self.gid = msg.get_int()
if self.flags & self.FLAG_PERMISSIONS:
self.permissions = msg.get_int()
if self.flags & self.FLAG_AMTIME:
self.atime = msg.get_int()
self.mtime = msg.get_int()
if self.flags & self.FLAG_EXTENDED:
count = msg.get_int()
for i in range(count):
self.attr[msg.get_string()] = msg.get_string()
return msg.get_remainder()
def pack(self, msg):
self.flags = 0
if hasattr(self, 'size'):
self.flags |= self.FLAG_SIZE
if hasattr(self, 'uid') or hasattr(self, 'gid'):
self.flags |= self.FLAG_UIDGID
if hasattr(self, 'permissions'):
self.flags |= self.FLAG_PERMISSIONS
if hasattr(self, 'atime') or hasattr(self, 'mtime'):
self.flags |= self.FLAG_AMTIME
if len(self.attr) > 0:
self.flags |= self.FLAG_EXTENDED
msg.add_int(self.flags)
if self.flags & self.FLAG_SIZE:
msg.add_int64(self.size)
if self.flags & self.FLAG_UIDGID:
msg.add_int(getattr(self, 'uid', 0))
msg.add_int(getattr(self, 'gid', 0))
if self.flags & self.FLAG_PERMISSIONS:
msg.add_int(self.permissions)
if self.flags & self.FLAG_AMTIME:
msg.add_int(getattr(self, 'atime', 0))
msg.add_int(getattr(self, 'mtime', 0))
if self.flags & self.FLAG_EXTENDED:
msg.add_int(len(self.attr))
for key, val in self.attr:
msg.add_string(key)
msg.add_string(val)
return
class SFTPError (Exception):
pass
class SFTPFile (BufferedFile):
def __init__(self, sftp, handle, mode='r', bufsize=-1):
BufferedFile.__init__(self)
self.sftp = sftp
self.handle = handle
BufferedFile._set_mode(self, mode, bufsize)
def _get_size(self):
t, msg = self.sftp._request(CMD_FSTAT, self.handle)
if t != CMD_ATTRS:
raise SFTPError('Expected attrs')
attr = SFTPAttributes()
attr.unpack(msg)
try:
return attr.size
except:
return 0
def close(self):
BufferedFile.close(self)
self.sftp._request(CMD_CLOSE, self.handle)
def _read(self, size):
t, msg = self.sftp._request(CMD_READ, self.handle, long(self._realpos), int(size))
if t != CMD_DATA:
raise SFTPError('Expected data')
return msg.get_string()
def _write(self, data):
t, msg = self.sftp._request(CMD_WRITE, self.handle, long(self._realpos), str(data))
return len(data)
def seek(self, offset, whence=0):
if whence == self.SEEK_SET:
self._realpos = self._pos = offset
elif whence == self.SEEK_CUR:
self._realpos += offset
self._pos += offset
else:
self._realpos = self._pos = self._get_size() + offset
self._rbuffer = self._wbuffer = ''
class SFTP (object):
def __init__(self, sock):
self.sock = sock
self.ultra_debug = 1
self.request_number = 1
if type(sock) is Channel:
self.logger = logging.getLogger('paramiko.chan.' + sock.get_name() + '.sftp')
else:
self.logger = logging.getLogger('paramiko.sftp')
# protocol: (maybe should move to a different method)
self._send_packet(CMD_INIT, struct.pack('>I', VERSION))
t, data = self._read_packet()
if t != CMD_VERSION:
raise SFTPError('Incompatible sftp protocol')
version = struct.unpack('>I', data[:4])[0]
if version != VERSION:
raise SFTPError('Incompatible sftp protocol')
def from_transport(selfclass, t):
chan = t.open_session()
if chan is None:
return None
chan.invoke_subsystem('sftp')
return selfclass(chan)
from_transport = classmethod(from_transport)
def listdir(self, path):
t, msg = self._request(CMD_OPENDIR, path)
if t != CMD_HANDLE:
raise SFTPError('Expected handle')
handle = msg.get_string()
filelist = []
while 1:
try:
t, msg = self._request(CMD_READDIR, handle)
except EOFError, e:
# done with handle
break
if t != CMD_NAME:
raise SFTPError('Expected name response')
count = msg.get_int()
for i in range(count):
filename = msg.get_string()
longname = msg.get_string()
attr = SFTPAttributes(msg)
if (filename != '.') and (filename != '..'):
filelist.append(filename)
# currently we ignore the rest
self._request(CMD_CLOSE, handle)
return filelist
def open(self, filename, mode='r', bufsize=-1):
imode = 0
if ('r' in mode) or ('+' in mode):
imode |= self._FXF_READ
if ('w' in mode) or ('+' in mode):
imode |= self._FXF_WRITE
if ('w' in mode):
imode |= self._FXF_CREATE | self._FXF_TRUNC
if ('a' in mode):
imode |= self._FXF_APPEND
attrblock = SFTPAttributes()
t, msg = self._request(CMD_OPEN, filename, imode, attrblock)
if t != CMD_HANDLE:
raise SFTPError('Expected handle')
handle = msg.get_string()
return SFTPFile(self, handle, mode, bufsize)
def remove(self, path):
"""
Remove the file at the given path.
@param path: path (absolute or relative) of the file to remove.
@type path: string
@raise IOError: if the path refers to a folder (directory). Use
L{rmdir} to remove a folder.
"""
self._request(CMD_REMOVE, path)
unlink = remove
def rename(self, oldpath, newpath):
"""
Rename a file or folder from C{oldpath} to C{newpath}.
@param oldpath: existing name of the file or folder.
@type oldpath: string
@param newpath: new name for the file or folder.
@type newpath: string
@raise IOError: if C{newpath} is a folder, or something else goes
wrong.
"""
self._request(CMD_RENAME, oldpath, newpath)
def mkdir(self, path, mode=0777):
"""
Create a folder (directory) named C{path} with numeric mode C{mode}.
The default mode is 0777 (octal). On some systems, mode is ignored.
Where it is used, the current umask value is first masked out.
@param path: name of the folder to create.
@type path: string
@param mode: permissions (posix-style) for the newly-created folder.
@type mode: int
"""
attr = SFTPAttributes()
attr.permissions = mode
self._request(CMD_MKDIR, path, attr)
### internals...
_FXF_READ = 0x1
_FXF_WRITE = 0x2
_FXF_APPEND = 0x4
_FXF_CREATE = 0x8
_FXF_TRUNC = 0x10
_FXF_EXCL = 0x20
def _log(self, level, msg):
if type(msg) == type([]):
for m in msg:
self.logger.log(level, m)
else:
self.logger.log(level, msg)
def _write_all(self, out):
while len(out) > 0:
n = self.sock.send(out)
if n <= 0:
raise EOFError()
if n == len(out):
return
out = out[n:]
return
def _read_all(self, n):
out = ''
while n > 0:
try:
x = self.sock.recv(n)
if len(x) == 0:
raise EOFError()
out += x
n -= len(x)
except socket.timeout:
if not self.active:
raise EOFError()
return out
def _send_packet(self, t, packet):
out = struct.pack('>I', len(packet) + 1) + chr(t) + packet
if self.ultra_debug:
self._log(DEBUG, format_binary(out, 'OUT: '))
self._write_all(out)
def _read_packet(self):
size = struct.unpack('>I', self._read_all(4))[0]
data = self._read_all(size)
if self.ultra_debug:
self._log(DEBUG, format_binary(data, 'IN: '));
if size > 0:
return ord(data[0]), data[1:]
return 0, ''
def _request(self, t, *arg):
msg = Message()
msg.add_int(self.request_number)
for item in arg:
if type(item) is int:
msg.add_int(item)
elif type(item) is long:
msg.add_int64(item)
elif type(item) is str:
msg.add_string(item)
elif type(item) is SFTPAttributes:
item.pack(msg)
else:
raise Exception('unknown type for ' + repr(item) + ' type ' + repr(type(item)))
self._send_packet(t, str(msg))
t, data = self._read_packet()
msg = Message(data)
num = msg.get_int()
if num != self.request_number:
raise SFTPError('Expected response #%d, got response #%d' % (self.request_number, num))
self.request_number += 1
if t == CMD_STATUS:
self._convert_status(msg)
return t, msg
def _convert_status(self, msg):
"""
Raises EOFError or IOError on error status; otherwise does nothing.
"""
code = msg.get_int()
text = msg.get_string()
if code == FX_OK:
return
elif code == FX_EOF:
raise EOFError(text)
else:
raise IOError(text)

View File

@ -92,7 +92,7 @@ def format_binary(data, prefix=''):
def format_binary_line(data):
left = ' '.join(['%02X' % ord(c) for c in data])
right = ''.join([('.%c..' % c)[(ord(c)+61)//94] for c in data])
right = ''.join([('.%c..' % c)[(ord(c)+63)//95] for c in data])
return '%-50s %s' % (left, right)
def hexify(s):