3
3
# This module is part of GitDB and is released under
4
4
# the New BSD License: http://www.opensource.org/licenses/bsd-license.php
5
5
"""Contains PackIndexFile and PackFile implementations"""
6
+ import array
7
+ from binascii import crc32
8
+ import os
9
+ from struct import pack
10
+ import sys
11
+ import tempfile
6
12
import zlib
7
13
14
+ from gitdb .base import (
15
+ OInfo ,
16
+ OStream ,
17
+ OPackInfo ,
18
+ OPackStream ,
19
+ ODeltaStream ,
20
+ ODeltaPackInfo ,
21
+ ODeltaPackStream ,
22
+ )
23
+ from gitdb .const import NULL_BYTE
8
24
from gitdb .exc import (
9
25
BadObject ,
10
26
AmbiguousObjectName ,
11
27
UnsupportedOperation ,
12
28
ParseError
13
29
)
14
-
15
- from gitdb .util import (
16
- mman ,
17
- LazyMixin ,
18
- unpack_from ,
19
- bin_to_hex ,
20
- byte_ord ,
21
- )
22
-
23
30
from gitdb .fun import (
24
31
create_pack_object_header ,
25
32
pack_object_header_info ,
33
40
REF_DELTA ,
34
41
msb_size
35
42
)
36
-
37
- try :
38
- from gitdb_speedups ._perf import PackIndexFile_sha_to_index
39
- except ImportError :
40
- pass
41
- # END try c module
42
-
43
- from gitdb .base import (
44
- OInfo ,
45
- OStream ,
46
- OPackInfo ,
47
- OPackStream ,
48
- ODeltaStream ,
49
- ODeltaPackInfo ,
50
- ODeltaPackStream ,
51
- )
52
-
53
43
from gitdb .stream import (
54
44
DecompressMemMapReader ,
55
45
DeltaApplyReader ,
56
46
Sha1Writer ,
57
47
NullStream ,
58
48
FlexibleSha1Writer
59
49
)
60
-
61
- from struct import pack
62
- from binascii import crc32
63
-
64
- from gitdb .const import NULL_BYTE
50
+ from gitdb .util import (
51
+ mman ,
52
+ LazyMixin ,
53
+ bin_to_hex ,
54
+ byte_ord ,
55
+ )
65
56
from gitdb .utils .compat import (
66
57
izip ,
67
58
buffer ,
68
59
xrange ,
69
- to_bytes
60
+ to_bytes ,
61
+ unpack_from ,
70
62
)
71
63
72
- import tempfile
73
- import array
74
- import os
75
- import sys
64
+
65
+ try :
66
+ from gitdb_speedups ._perf import PackIndexFile_sha_to_index
67
+ except ImportError :
68
+ pass
69
+ # END try c module
70
+
76
71
77
72
__all__ = ('PackIndexFile' , 'PackFile' , 'PackEntity' )
78
73
@@ -290,8 +285,9 @@ def _make_cursor(self):
290
285
291
286
# We will assume that the index will always fully fit into memory !
292
287
if mman .window_size () > 0 and cursor .file_size () > mman .window_size ():
293
- raise AssertionError ("The index file at %s is too large to fit into a mapped window (%i > %i). This is a limitation of the implementation" % (
294
- self ._indexpath , cursor .file_size (), mman .window_size ()))
288
+ raise AssertionError ("The index file at %s is too large to fit into a mapped window (%i > %i). "
289
+ "This is a limitation of the hardware." % (
290
+ self ._indexpath , cursor .file_size (), mman .window_size ()))
295
291
296
292
return cursor
297
293
@@ -528,7 +524,7 @@ class PackFile(LazyMixin):
528
524
'_size' ,
529
525
'_version' ,
530
526
'_entered' ,
531
- )
527
+ )
532
528
pack_signature = 0x5041434b # 'PACK'
533
529
pack_version_default = 2
534
530
@@ -603,7 +599,11 @@ def data(self):
603
599
"""
604
600
:return: read-only data of this pack. It provides random access and usually
605
601
is a memory map.
606
- :note: This method is unsafe as it returns a window into a file which might be larger than than the actual window size"""
602
+
603
+ .. note::
604
+ This method is unsafe as it returns a window into a file which might be larger
605
+ than than the actual window size
606
+ """
607
607
# can use map as we are starting at offset 0. Otherwise we would have to use buffer()
608
608
return self ._cursor .use_region ().map ()
609
609
@@ -761,7 +761,8 @@ def _object(self, sha, as_stream, index=-1):
761
761
sha = self ._index .sha (index )
762
762
# END assure sha is present ( in output )
763
763
offset = self ._index .offset (index )
764
- type_id , uncomp_size , data_rela_offset = pack_object_header_info (self ._pack ._cursor .use_region (offset ).buffer ())
764
+ type_id , uncomp_size , _ = pack_object_header_info (
765
+ self ._pack ._cursor .use_region (offset ).buffer ())
765
766
if as_stream :
766
767
if type_id not in delta_types :
767
768
packstream = self ._pack .stream (offset )
@@ -784,7 +785,7 @@ def _object(self, sha, as_stream, index=-1):
784
785
# the actual target size, as opposed to the size of the delta data
785
786
streams = self .collect_streams_at_offset (offset )
786
787
buf = streams [0 ].read (512 )
787
- offset , src_size = msb_size (buf )
788
+ offset , src_size = msb_size (buf ) # @UnusedVariable
788
789
offset , target_size = msb_size (buf , offset )
789
790
790
791
# collect the streams to obtain the actual object type
@@ -1019,8 +1020,9 @@ def write_pack(cls, object_iter, pack_write, index_write=None,
1019
1020
# END for each object
1020
1021
1021
1022
if actual_count != object_count :
1022
- raise ValueError (
1023
- "Expected to write %i objects into pack, but received only %i from iterators" % (object_count , actual_count ))
1023
+ raise ValueError ("Expected to write %i objects into pack, "
1024
+ "but received only %i from iterators" %
1025
+ (object_count , actual_count ))
1024
1026
# END count assertion
1025
1027
1026
1028
# write footer
@@ -1049,7 +1051,8 @@ def create(cls, object_iter, base_dir, object_count=None, zlib_compression=zlib.
1049
1051
pack_write = lambda d : os .write (pack_fd , d )
1050
1052
index_write = lambda d : os .write (index_fd , d )
1051
1053
1052
- pack_binsha , index_binsha = cls .write_pack (object_iter , pack_write , index_write , object_count , zlib_compression )
1054
+ pack_binsha , _ = cls .write_pack (
1055
+ object_iter , pack_write , index_write , object_count , zlib_compression )
1053
1056
os .close (pack_fd )
1054
1057
os .close (index_fd )
1055
1058
0 commit comments