Skip to content

Commit 4524faf

Browse files
committed
Fixed up docs for upcoming release. Bumped version to 0.5.3
1 parent 40762f9 commit 4524faf

File tree

12 files changed

+69
-44
lines changed

12 files changed

+69
-44
lines changed

Makefile

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -23,5 +23,5 @@ clean::
2323
rm -f *.so
2424

2525
coverage:: build
26-
PYTHONPATH=. $(PYTHON) $(TESTRUNNER) --cover-package=dulwich --with-coverage --cover-erase --cover-inclusive gitdb
26+
PYTHONPATH=. $(PYTHON) $(TESTRUNNER) --cover-package=gitdb --with-coverage --cover-erase --cover-inclusive gitdb
2727

gitdb/__init__.py

Lines changed: 7 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -24,6 +24,13 @@ def _init_externals():
2424

2525
_init_externals()
2626

27+
__author__ = "Sebastian Thiel"
28+
__contact__ = "[email protected]"
29+
__homepage__ = "https://github.com/gitpython-developers/gitdb"
30+
version_info = (0, 5, 3)
31+
__version__ = '.'.join(str(i) for i in version_info)
32+
33+
2734
# default imports
2835
from db import *
2936
from base import *

gitdb/db/base.py

Lines changed: 4 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -72,7 +72,8 @@ def stream_async(self, reader):
7272
:param reader: see ``info``
7373
:param max_threads: see ``ObjectDBW.store``
7474
:return: async.Reader yielding OStream|InvalidOStream instances in any order
75-
:note: depending on the system configuration, it might not be possible to
75+
76+
**Note:** depending on the system configuration, it might not be possible to
7677
read all OStreams at once. Instead, read them individually using reader.read(x)
7778
where x is small enough."""
7879
# base implementation just uses the stream method repeatedly
@@ -140,7 +141,7 @@ def store_async(self, reader):
140141
The same instances will be used in the output channel as were received
141142
in by the Reader.
142143
143-
:note:As some ODB implementations implement this operation atomic, they might
144+
**Note:** As some ODB implementations implement this operation atomic, they might
144145
abort the whole operation if one item could not be processed. Hence check how
145146
many items have actually been produced."""
146147
# base implementation uses store to perform the work
@@ -158,7 +159,7 @@ def __init__(self, root_path):
158159
"""Initialize this instance to look for its files at the given root path
159160
All subsequent operations will be relative to this path
160161
:raise InvalidDBRoot:
161-
:note: The base will not perform any accessablity checking as the base
162+
**Note:** The base will not perform any accessablity checking as the base
162163
might not yet be accessible, but become accessible before the first
163164
access."""
164165
super(FileDBBase, self).__init__()

gitdb/db/mem.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ class MemoryDB(ObjectDBR, ObjectDBW):
3333
it to the actual physical storage, as it allows to query whether object already
3434
exists in the target storage before introducing actual IO
3535
36-
:note: memory is currently not threadsafe, hence the async methods cannot be used
36+
**Note:** memory is currently not threadsafe, hence the async methods cannot be used
3737
for storing"""
3838

3939
def __init__(self):
@@ -92,7 +92,7 @@ def sha_iter(self):
9292
def stream_copy(self, sha_iter, odb):
9393
"""Copy the streams as identified by sha's yielded by sha_iter into the given odb
9494
The streams will be copied directly
95-
:note: the object will only be written if it did not exist in the target db
95+
**Note:** the object will only be written if it did not exist in the target db
9696
:return: amount of streams actually copied into odb. If smaller than the amount
9797
of input shas, one or more objects did already exist in odb"""
9898
count = 0

gitdb/db/pack.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -58,7 +58,7 @@ def _pack_info(self, sha):
5858
""":return: tuple(entity, index) for an item at the given sha
5959
:param sha: 20 or 40 byte sha
6060
:raise BadObject:
61-
:note: This method is not thread-safe, but may be hit in multi-threaded
61+
**Note:** This method is not thread-safe, but may be hit in multi-threaded
6262
operation. The worst thing that can happen though is a counter that
6363
was not incremented, or the list being in wrong order. So we safe
6464
the time for locking here, lets see how that goes"""

gitdb/ext/smmap

Submodule smmap updated from 84eedc5 to f097bd6

gitdb/fun.py

Lines changed: 9 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -138,7 +138,7 @@ def _closest_index(dcl, absofs):
138138
""":return: index at which the given absofs should be inserted. The index points
139139
to the DeltaChunk with a target buffer absofs that equals or is greater than
140140
absofs.
141-
:note: global method for performance only, it belongs to DeltaChunkList"""
141+
**Note:** global method for performance only, it belongs to DeltaChunkList"""
142142
lo = 0
143143
hi = len(dcl)
144144
while lo < hi:
@@ -414,9 +414,11 @@ def pack_object_header_info(data):
414414
return (type_id, size, i)
415415

416416
def create_pack_object_header(obj_type, obj_size):
417-
""":return: string defining the pack header comprised of the object type
418-
and its incompressed size in bytes
419-
:parmam obj_type: pack type_id of the object
417+
"""
418+
:return: string defining the pack header comprised of the object type
419+
and its incompressed size in bytes
420+
421+
:param obj_type: pack type_id of the object
420422
:param obj_size: uncompressed size in bytes of the following object stream"""
421423
c = 0 # 1 byte
422424
hdr = str() # output string
@@ -483,7 +485,7 @@ def stream_copy(read, write, size, chunk_size):
483485
Copy a stream up to size bytes using the provided read and write methods,
484486
in chunks of chunk_size
485487
486-
:note: its much like stream_copy utility, but operates just using methods"""
488+
**Note:** its much like stream_copy utility, but operates just using methods"""
487489
dbw = 0 # num data bytes written
488490

489491
# WRITE ALL DATA UP TO SIZE
@@ -597,7 +599,8 @@ def apply_delta_data(src_buf, src_buf_size, delta_buf, delta_buf_size, write):
597599
:param delta_buf_size: size fo the delta buffer in bytes
598600
:param delta_buf: random access delta data
599601
:param write: write method taking a chunk of bytes
600-
:note: transcribed to python from the similar routine in patch-delta.c"""
602+
603+
**Note:** transcribed to python from the similar routine in patch-delta.c"""
601604
i = 0
602605
db = delta_buf
603606
while i < delta_buf_size:

gitdb/pack.py

Lines changed: 17 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -173,7 +173,7 @@ def write_stream_to_pack(read, write, zstream, base_crc=None):
173173
class IndexWriter(object):
174174
"""Utility to cache index information, allowing to write all information later
175175
in one go to the given stream
176-
:note: currently only writes v2 indices"""
176+
**Note:** currently only writes v2 indices"""
177177
__slots__ = '_objs'
178178

179179
def __init__(self):
@@ -391,7 +391,8 @@ def indexfile_checksum(self):
391391

392392
def offsets(self):
393393
""":return: sequence of all offsets in the order in which they were written
394-
:note: return value can be random accessed, but may be immmutable"""
394+
395+
**Note:** return value can be random accessed, but may be immmutable"""
395396
if self._version == 2:
396397
# read stream to array, convert to tuple
397398
a = array.array('I') # 4 byte unsigned int, long are 8 byte on 64 bit it appears
@@ -497,10 +498,10 @@ class PackFile(LazyMixin):
497498
packs therefor is 32 bit on 32 bit systems. On 64 bit systems, this should be
498499
fine though.
499500
500-
:note: at some point, this might be implemented using streams as well, or
501-
streams are an alternate path in the case memory maps cannot be created
502-
for some reason - one clearly doesn't want to read 10GB at once in that
503-
case"""
501+
**Note:** at some point, this might be implemented using streams as well, or
502+
streams are an alternate path in the case memory maps cannot be created
503+
for some reason - one clearly doesn't want to read 10GB at once in that
504+
case"""
504505

505506
__slots__ = ('_packpath', '_cursor', '_size', '_version')
506507
pack_signature = 0x5041434b # 'PACK'
@@ -625,8 +626,9 @@ def stream_iter(self, start_offset=0):
625626
to access the data in the pack directly.
626627
:param start_offset: offset to the first object to iterate. If 0, iteration
627628
starts at the very first object in the pack.
628-
:note: Iterating a pack directly is costly as the datastream has to be decompressed
629-
to determine the bounds between the objects"""
629+
630+
**Note:** Iterating a pack directly is costly as the datastream has to be decompressed
631+
to determine the bounds between the objects"""
630632
return self._iter_objects(start_offset, as_stream=True)
631633

632634
#} END Read-Database like Interface
@@ -902,9 +904,11 @@ def write_pack(cls, object_iter, pack_write, index_write=None,
902904
:param zlib_compression: the zlib compression level to use
903905
:return: tuple(pack_sha, index_binsha) binary sha over all the contents of the pack
904906
and over all contents of the index. If index_write was None, index_binsha will be None
905-
:note: The destination of the write functions is up to the user. It could
906-
be a socket, or a file for instance
907-
:note: writes only undeltified objects"""
907+
908+
**Note:** The destination of the write functions is up to the user. It could
909+
be a socket, or a file for instance
910+
911+
**Note:** writes only undeltified objects"""
908912
objs = object_iter
909913
if not object_count:
910914
if not isinstance(object_iter, (tuple, list)):
@@ -979,7 +983,8 @@ def create(cls, object_iter, base_dir, object_count = None, zlib_compression = z
979983
and corresponding index file. The pack contains all OStream objects contained in object iter.
980984
:param base_dir: directory which is to contain the files
981985
:return: PackEntity instance initialized with the new pack
982-
:note: for more information on the other parameters see the write_pack method"""
986+
987+
**Note:** for more information on the other parameters see the write_pack method"""
983988
pack_fd, pack_path = tempfile.mkstemp('', 'pack', base_dir)
984989
index_fd, index_path = tempfile.mkstemp('', 'index', base_dir)
985990
pack_write = lambda d: os.write(pack_fd, d)

gitdb/stream.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ class DecompressMemMapReader(LazyMixin):
5151
To read efficiently, you clearly don't want to read individual bytes, instead,
5252
read a few kilobytes at least.
5353
54-
:note: The chunk-size should be carefully selected as it will involve quite a bit
54+
**Note:** The chunk-size should be carefully selected as it will involve quite a bit
5555
of string copying due to the way the zlib is implemented. Its very wasteful,
5656
hence we try to find a good tradeoff between allocation time and number of
5757
times we actually allocate. An own zlib implementation would be good here
@@ -609,8 +609,8 @@ class FDCompressedSha1Writer(Sha1Writer):
609609
"""Digests data written to it, making the sha available, then compress the
610610
data and write it to the file descriptor
611611
612-
:note: operates on raw file descriptors
613-
:note: for this to work, you have to use the close-method of this instance"""
612+
**Note:** operates on raw file descriptors
613+
**Note:** for this to work, you have to use the close-method of this instance"""
614614
__slots__ = ("fd", "sha1", "zip")
615615

616616
# default exception

gitdb/test/db/lib.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -65,7 +65,7 @@ def _assert_object_writing_simple(self, db):
6565

6666
def _assert_object_writing(self, db):
6767
"""General tests to verify object writing, compatible to ObjectDBW
68-
:note: requires write access to the database"""
68+
**Note:** requires write access to the database"""
6969
# start in 'dry-run' mode, using a simple sha1 writer
7070
ostreams = (ZippedStoreShaWriter, None)
7171
for ostreamcls in ostreams:

gitdb/util.py

Lines changed: 18 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -119,8 +119,9 @@ def __getslice__(self, start, end):
119119
#{ Routines
120120

121121
def make_sha(source=''):
122-
"""A python2.4 workaround for the sha/hashlib module fiasco
123-
:note: From the dulwich project """
122+
"""A python2.4 workaround for the sha/hashlib module fiasco
123+
124+
**Note** From the dulwich project """
124125
try:
125126
return hashlib.sha1(source)
126127
except NameError:
@@ -146,6 +147,7 @@ def allocate_memory(size):
146147

147148
def file_contents_ro(fd, stream=False, allow_mmap=True):
148149
""":return: read-only contents of the file represented by the file descriptor fd
150+
149151
:param fd: file descriptor opened for reading
150152
:param stream: if False, random access is provided, otherwise the stream interface
151153
is provided.
@@ -173,14 +175,16 @@ def file_contents_ro(fd, stream=False, allow_mmap=True):
173175

174176
def file_contents_ro_filepath(filepath, stream=False, allow_mmap=True, flags=0):
175177
"""Get the file contents at filepath as fast as possible
178+
176179
:return: random access compatible memory of the given filepath
177180
:param stream: see ``file_contents_ro``
178181
:param allow_mmap: see ``file_contents_ro``
179182
:param flags: additional flags to pass to os.open
180183
:raise OSError: If the file could not be opened
181-
:note: for now we don't try to use O_NOATIME directly as the right value needs to be
182-
shared per database in fact. It only makes a real difference for loose object
183-
databases anyway, and they use it with the help of the ``flags`` parameter"""
184+
185+
**Note** for now we don't try to use O_NOATIME directly as the right value needs to be
186+
shared per database in fact. It only makes a real difference for loose object
187+
databases anyway, and they use it with the help of the ``flags`` parameter"""
184188
fd = os.open(filepath, os.O_RDONLY|getattr(os, 'O_BINARY', 0)|flags)
185189
try:
186190
return file_contents_ro(fd, stream, allow_mmap)
@@ -189,7 +193,8 @@ def file_contents_ro_filepath(filepath, stream=False, allow_mmap=True, flags=0):
189193
# END assure file is closed
190194

191195
def sliding_ro_buffer(filepath, flags=0):
192-
""":return: a buffer compatible object which uses our mapped memory manager internally
196+
"""
197+
:return: a buffer compatible object which uses our mapped memory manager internally
193198
ready to read the whole given filepath"""
194199
return SlidingWindowMapBuffer(mman.make_cursor(filepath), flags=flags)
195200

@@ -254,7 +259,7 @@ class LockedFD(object):
254259
This type handles error correctly in that it will assure a consistent state
255260
on destruction.
256261
257-
:note: with this setup, parallel reading is not possible"""
262+
**note** with this setup, parallel reading is not possible"""
258263
__slots__ = ("_filepath", '_fd', '_write')
259264

260265
def __init__(self, filepath):
@@ -283,7 +288,8 @@ def open(self, write=False, stream=False):
283288
and must not be closed directly
284289
:raise IOError: if the lock could not be retrieved
285290
:raise OSError: If the actual file could not be opened for reading
286-
:note: must only be called once"""
291+
292+
**note** must only be called once"""
287293
if self._write is not None:
288294
raise AssertionError("Called %s multiple times" % self.open)
289295

@@ -327,13 +333,15 @@ def commit(self):
327333
"""When done writing, call this function to commit your changes into the
328334
actual file.
329335
The file descriptor will be closed, and the lockfile handled.
330-
:note: can be called multiple times"""
336+
337+
**Note** can be called multiple times"""
331338
self._end_writing(successful=True)
332339

333340
def rollback(self):
334341
"""Abort your operation without any changes. The file descriptor will be
335342
closed, and the lock released.
336-
:note: can be called multiple times"""
343+
344+
**Note** can be called multiple times"""
337345
self._end_writing(successful=False)
338346

339347
def _end_writing(self, successful=True):

setup.py

Lines changed: 5 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -4,6 +4,7 @@
44
from distutils.command.build_ext import build_ext
55

66
import os, sys
7+
import gitdb as meta
78

89
# wow, this is a mixed bag ... I am pretty upset about all of this ...
910
setuptools_build_py_module = None
@@ -69,11 +70,11 @@ def get_data_files(self):
6970

7071
setup(cmdclass={'build_ext':build_ext_nofail},
7172
name = "gitdb",
72-
version = "0.5.3",
73+
version = meta.__version__,
7374
description = "Git Object Database",
74-
author = "Sebastian Thiel",
75-
author_email = "[email protected]",
76-
url = "http://gitorious.org/git-python/gitdb",
75+
author = meta.__author__,
76+
author_email = meta.__contact__,
77+
url = meta.__homepage__,
7778
packages = ('gitdb', 'gitdb.db', 'gitdb.test', 'gitdb.test.db', 'gitdb.test.performance'),
7879
package_data={ 'gitdb.test' : ['fixtures/packs/*', 'fixtures/objects/7b/*']},
7980
package_dir = {'gitdb':'gitdb'},

0 commit comments

Comments
 (0)