Skip to content

Commit 1857914

Browse files
committed
Add type hints
1 parent 6075461 commit 1857914

File tree

3 files changed

+55
-34
lines changed

3 files changed

+55
-34
lines changed

setup.py

+2
Original file line numberDiff line numberDiff line change
@@ -63,6 +63,7 @@
6363
'Operating System :: MacOS :: MacOS X',
6464
# 'Operating System :: Microsoft :: Windows', -- Not tested yet
6565
'Operating System :: POSIX',
66+
'Typing :: Typed',
6667
'Programming Language :: Python :: 3',
6768
'Programming Language :: Python :: 3.8',
6869
'Programming Language :: Python :: 3.9',
@@ -74,4 +75,5 @@
7475
install_requires=install_requires,
7576
setup_requires=setup_requires,
7677
package_dir={'': 'src'},
78+
package_data={'snappy': ['py.typed']}
7779
)

src/snappy/py.typed

Whitespace-only changes.

src/snappy/snappy.py

+53-34
Original file line numberDiff line numberDiff line change
@@ -39,9 +39,12 @@
3939
assert "some data" == snappy.uncompress(compressed)
4040
4141
"""
42-
from __future__ import absolute_import
42+
from __future__ import absolute_import, annotations
4343

4444
import struct
45+
from typing import (
46+
Optional, Union, IO, BinaryIO, Protocol, Type, overload, Any,
47+
)
4548

4649
import cramjam
4750

@@ -59,7 +62,7 @@ class UncompressError(Exception):
5962
pass
6063

6164

62-
def isValidCompressed(data):
65+
def isValidCompressed(data: Union[str, bytes]) -> bool:
6366
if isinstance(data, str):
6467
data = data.encode('utf-8')
6568

@@ -71,12 +74,18 @@ def isValidCompressed(data):
7174
return ok
7275

7376

74-
def compress(data, encoding='utf-8'):
77+
def compress(data: Union[str, bytes], encoding: str = 'utf-8') -> bytes:
7578
if isinstance(data, str):
7679
data = data.encode(encoding)
7780

7881
return bytes(_compress(data))
7982

83+
@overload
84+
def uncompress(data: bytes) -> bytes: ...
85+
86+
@overload
87+
def uncompress(data: bytes, decoding: Optional[str] = None) -> Union[str, bytes]: ...
88+
8089
def uncompress(data, decoding=None):
8190
if isinstance(data, str):
8291
raise UncompressError("It's only possible to uncompress bytes")
@@ -91,6 +100,16 @@ def uncompress(data, decoding=None):
91100

92101
decompress = uncompress
93102

103+
104+
class Compressor(Protocol):
105+
def add_chunk(self, data) -> Any: ...
106+
107+
108+
class Decompressor(Protocol):
109+
def decompress(self, data) -> Any: ...
110+
def flush(self): ...
111+
112+
94113
class StreamCompressor():
95114

96115
"""This class implements the compressor-side of the proposed Snappy framing
@@ -111,7 +130,7 @@ class StreamCompressor():
111130
def __init__(self):
112131
self.c = cramjam.snappy.Compressor()
113132

114-
def add_chunk(self, data: bytes, compress=None):
133+
def add_chunk(self, data: bytes, compress=None) -> bytes:
115134
"""Add a chunk, returning a string that is framed and compressed.
116135
117136
Outputs a single snappy chunk; if it is the very start of the stream,
@@ -122,10 +141,10 @@ def add_chunk(self, data: bytes, compress=None):
122141

123142
compress = add_chunk
124143

125-
def flush(self):
144+
def flush(self) -> bytes:
126145
return bytes(self.c.flush())
127146

128-
def copy(self):
147+
def copy(self) -> 'StreamCompressor':
129148
"""This method exists for compatibility with the zlib compressobj.
130149
"""
131150
return self
@@ -159,7 +178,7 @@ def check_format(fin):
159178
except:
160179
return False
161180

162-
def decompress(self, data: bytes):
181+
def decompress(self, data: bytes) -> bytes:
163182
"""Decompress 'data', returning a string containing the uncompressed
164183
data corresponding to at least part of the data in string. This data
165184
should be concatenated to the output produced by any preceding calls to
@@ -191,15 +210,15 @@ def decompress(self, data: bytes):
191210
self.c.decompress(data)
192211
return self.flush()
193212

194-
def flush(self):
213+
def flush(self) -> bytes:
195214
return bytes(self.c.flush())
196215

197-
def copy(self):
216+
def copy(self) -> 'StreamDecompressor':
198217
return self
199218

200219

201220
class HadoopStreamCompressor():
202-
def add_chunk(self, data: bytes, compress=None):
221+
def add_chunk(self, data: bytes, compress=None) -> bytes:
203222
"""Add a chunk, returning a string that is framed and compressed.
204223
205224
Outputs a single snappy chunk; if it is the very start of the stream,
@@ -210,11 +229,11 @@ def add_chunk(self, data: bytes, compress=None):
210229

211230
compress = add_chunk
212231

213-
def flush(self):
232+
def flush(self) -> bytes:
214233
# never maintains a buffer
215234
return b""
216235

217-
def copy(self):
236+
def copy(self) -> 'HadoopStreamCompressor':
218237
"""This method exists for compatibility with the zlib compressobj.
219238
"""
220239
return self
@@ -241,7 +260,7 @@ def check_format(fin):
241260
except:
242261
return False
243262

244-
def decompress(self, data: bytes):
263+
def decompress(self, data: bytes) -> bytes:
245264
"""Decompress 'data', returning a string containing the uncompressed
246265
data corresponding to at least part of the data in string. This data
247266
should be concatenated to the output produced by any preceding calls to
@@ -264,18 +283,18 @@ def decompress(self, data: bytes):
264283
data = data[8 + chunk_length:]
265284
return b"".join(out)
266285

267-
def flush(self):
286+
def flush(self) -> bytes:
268287
return b""
269288

270-
def copy(self):
289+
def copy(self) -> 'HadoopStreamDecompressor':
271290
return self
272291

273292

274293

275-
def stream_compress(src,
276-
dst,
277-
blocksize=_STREAM_TO_STREAM_BLOCK_SIZE,
278-
compressor_cls=StreamCompressor):
294+
def stream_compress(src: IO,
295+
dst: IO,
296+
blocksize: int = _STREAM_TO_STREAM_BLOCK_SIZE,
297+
compressor_cls: Type[Compressor] = StreamCompressor) -> None:
279298
"""Takes an incoming file-like object and an outgoing file-like object,
280299
reads data from src, compresses it, and writes it to dst. 'src' should
281300
support the read method, and 'dst' should support the write method.
@@ -290,11 +309,11 @@ def stream_compress(src,
290309
if buf: dst.write(buf)
291310

292311

293-
def stream_decompress(src,
294-
dst,
295-
blocksize=_STREAM_TO_STREAM_BLOCK_SIZE,
296-
decompressor_cls=StreamDecompressor,
297-
start_chunk=None):
312+
def stream_decompress(src: IO,
313+
dst: IO,
314+
blocksize: int = _STREAM_TO_STREAM_BLOCK_SIZE,
315+
decompressor_cls: Type[Decompressor] = StreamDecompressor,
316+
start_chunk=None) -> None:
298317
"""Takes an incoming file-like object and an outgoing file-like object,
299318
reads data from src, decompresses it, and writes it to dst. 'src' should
300319
support the read method, and 'dst' should support the write method.
@@ -319,10 +338,10 @@ def stream_decompress(src,
319338

320339

321340
def hadoop_stream_decompress(
322-
src,
323-
dst,
324-
blocksize=_STREAM_TO_STREAM_BLOCK_SIZE,
325-
):
341+
src: BinaryIO,
342+
dst: BinaryIO,
343+
blocksize: int = _STREAM_TO_STREAM_BLOCK_SIZE,
344+
) -> None:
326345
c = HadoopStreamDecompressor()
327346
while True:
328347
data = src.read(blocksize)
@@ -335,10 +354,10 @@ def hadoop_stream_decompress(
335354

336355

337356
def hadoop_stream_compress(
338-
src,
339-
dst,
340-
blocksize=_STREAM_TO_STREAM_BLOCK_SIZE,
341-
):
357+
src: BinaryIO,
358+
dst: BinaryIO,
359+
blocksize: int = _STREAM_TO_STREAM_BLOCK_SIZE,
360+
) -> None:
342361
c = HadoopStreamCompressor()
343362
while True:
344363
data = src.read(blocksize)
@@ -350,11 +369,11 @@ def hadoop_stream_compress(
350369
dst.flush()
351370

352371

353-
def raw_stream_decompress(src, dst):
372+
def raw_stream_decompress(src: BinaryIO, dst: BinaryIO) -> None:
354373
data = src.read()
355374
dst.write(decompress(data))
356375

357376

358-
def raw_stream_compress(src, dst):
377+
def raw_stream_compress(src: BinaryIO, dst: BinaryIO) -> None:
359378
data = src.read()
360379
dst.write(compress(data))

0 commit comments

Comments
 (0)