Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.11/site-packages/gitdb/pack.py: 21%

Shortcuts on this page

r m x   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

465 statements  

1# Copyright (C) 2010, 2011 Sebastian Thiel (byronimo@gmail.com) and contributors 

2# 

3# This module is part of GitDB and is released under 

4# the New BSD License: https://opensource.org/license/bsd-3-clause/ 

5"""Contains PackIndexFile and PackFile implementations""" 

6import zlib 

7 

8from gitdb.exc import ( 

9 BadObject, 

10 AmbiguousObjectName, 

11 UnsupportedOperation, 

12 ParseError 

13) 

14 

15from gitdb.util import ( 

16 mman, 

17 LazyMixin, 

18 unpack_from, 

19 bin_to_hex, 

20 byte_ord, 

21) 

22 

23from gitdb.fun import ( 

24 create_pack_object_header, 

25 pack_object_header_info, 

26 is_equal_canonical_sha, 

27 type_id_to_type_map, 

28 write_object, 

29 stream_copy, 

30 chunk_size, 

31 delta_types, 

32 OFS_DELTA, 

33 REF_DELTA, 

34 msb_size 

35) 

36 

37try: 

38 from gitdb_speedups._perf import PackIndexFile_sha_to_index 

39except ImportError: 

40 pass 

41# END try c module 

42 

43from gitdb.base import ( # Amazing ! 

44 OInfo, 

45 OStream, 

46 OPackInfo, 

47 OPackStream, 

48 ODeltaStream, 

49 ODeltaPackInfo, 

50 ODeltaPackStream, 

51) 

52 

53from gitdb.stream import ( 

54 DecompressMemMapReader, 

55 DeltaApplyReader, 

56 Sha1Writer, 

57 NullStream, 

58 FlexibleSha1Writer 

59) 

60 

61from struct import pack 

62from binascii import crc32 

63 

64from gitdb.const import NULL_BYTE 

65 

66import tempfile 

67import array 

68import os 

69import sys 

70 

71__all__ = ('PackIndexFile', 'PackFile', 'PackEntity') 

72 

73 

74#{ Utilities 

75 

76def pack_object_at(cursor, offset, as_stream): 

77 """ 

78 :return: Tuple(abs_data_offset, PackInfo|PackStream) 

79 an object of the correct type according to the type_id of the object. 

80 If as_stream is True, the object will contain a stream, allowing the 

81 data to be read decompressed. 

82 :param data: random accessible data containing all required information 

83 :parma offset: offset in to the data at which the object information is located 

84 :param as_stream: if True, a stream object will be returned that can read 

85 the data, otherwise you receive an info object only""" 

86 data = cursor.use_region(offset).buffer() 

87 type_id, uncomp_size, data_rela_offset = pack_object_header_info(data) 

88 total_rela_offset = None # set later, actual offset until data stream begins 

89 delta_info = None 

90 

91 # OFFSET DELTA 

92 if type_id == OFS_DELTA: 

93 i = data_rela_offset 

94 c = byte_ord(data[i]) 

95 i += 1 

96 delta_offset = c & 0x7f 

97 while c & 0x80: 

98 c = byte_ord(data[i]) 

99 i += 1 

100 delta_offset += 1 

101 delta_offset = (delta_offset << 7) + (c & 0x7f) 

102 # END character loop 

103 delta_info = delta_offset 

104 total_rela_offset = i 

105 # REF DELTA 

106 elif type_id == REF_DELTA: 

107 total_rela_offset = data_rela_offset + 20 

108 delta_info = data[data_rela_offset:total_rela_offset] 

109 # BASE OBJECT 

110 else: 

111 # assume its a base object 

112 total_rela_offset = data_rela_offset 

113 # END handle type id 

114 abs_data_offset = offset + total_rela_offset 

115 if as_stream: 

116 stream = DecompressMemMapReader(data[total_rela_offset:], False, uncomp_size) 

117 if delta_info is None: 

118 return abs_data_offset, OPackStream(offset, type_id, uncomp_size, stream) 

119 else: 

120 return abs_data_offset, ODeltaPackStream(offset, type_id, uncomp_size, delta_info, stream) 

121 else: 

122 if delta_info is None: 

123 return abs_data_offset, OPackInfo(offset, type_id, uncomp_size) 

124 else: 

125 return abs_data_offset, ODeltaPackInfo(offset, type_id, uncomp_size, delta_info) 

126 # END handle info 

127 # END handle stream 

128 

129 

130def write_stream_to_pack(read, write, zstream, base_crc=None): 

131 """Copy a stream as read from read function, zip it, and write the result. 

132 Count the number of written bytes and return it 

133 :param base_crc: if not None, the crc will be the base for all compressed data 

134 we consecutively write and generate a crc32 from. If None, no crc will be generated 

135 :return: tuple(no bytes read, no bytes written, crc32) crc might be 0 if base_crc 

136 was false""" 

137 br = 0 # bytes read 

138 bw = 0 # bytes written 

139 want_crc = base_crc is not None 

140 crc = 0 

141 if want_crc: 

142 crc = base_crc 

143 # END initialize crc 

144 

145 while True: 

146 chunk = read(chunk_size) 

147 br += len(chunk) 

148 compressed = zstream.compress(chunk) 

149 bw += len(compressed) 

150 write(compressed) # cannot assume return value 

151 

152 if want_crc: 

153 crc = crc32(compressed, crc) 

154 # END handle crc 

155 

156 if len(chunk) != chunk_size: 

157 break 

158 # END copy loop 

159 

160 compressed = zstream.flush() 

161 bw += len(compressed) 

162 write(compressed) 

163 if want_crc: 

164 crc = crc32(compressed, crc) 

165 # END handle crc 

166 

167 return (br, bw, crc) 

168 

169 

170#} END utilities 

171 

172 

173class IndexWriter: 

174 

175 """Utility to cache index information, allowing to write all information later 

176 in one go to the given stream 

177 **Note:** currently only writes v2 indices""" 

178 __slots__ = '_objs' 

179 

180 def __init__(self): 

181 self._objs = list() 

182 

183 def append(self, binsha, crc, offset): 

184 """Append one piece of object information""" 

185 self._objs.append((binsha, crc, offset)) 

186 

187 def write(self, pack_sha, write): 

188 """Write the index file using the given write method 

189 :param pack_sha: binary sha over the whole pack that we index 

190 :return: sha1 binary sha over all index file contents""" 

191 # sort for sha1 hash 

192 self._objs.sort(key=lambda o: o[0]) 

193 

194 sha_writer = FlexibleSha1Writer(write) 

195 sha_write = sha_writer.write 

196 sha_write(PackIndexFile.index_v2_signature) 

197 sha_write(pack(">L", PackIndexFile.index_version_default)) 

198 

199 # fanout 

200 tmplist = list((0,) * 256) # fanout or list with 64 bit offsets 

201 for t in self._objs: 

202 tmplist[byte_ord(t[0][0])] += 1 

203 # END prepare fanout 

204 for i in range(255): 

205 v = tmplist[i] 

206 sha_write(pack('>L', v)) 

207 tmplist[i + 1] += v 

208 # END write each fanout entry 

209 sha_write(pack('>L', tmplist[255])) 

210 

211 # sha1 ordered 

212 # save calls, that is push them into c 

213 sha_write(b''.join(t[0] for t in self._objs)) 

214 

215 # crc32 

216 for t in self._objs: 

217 sha_write(pack('>L', t[1] & 0xffffffff)) 

218 # END for each crc 

219 

220 tmplist = list() 

221 # offset 32 

222 for t in self._objs: 

223 ofs = t[2] 

224 if ofs > 0x7fffffff: 

225 tmplist.append(ofs) 

226 ofs = 0x80000000 + len(tmplist) - 1 

227 # END handle 64 bit offsets 

228 sha_write(pack('>L', ofs & 0xffffffff)) 

229 # END for each offset 

230 

231 # offset 64 

232 for ofs in tmplist: 

233 sha_write(pack(">Q", ofs)) 

234 # END for each offset 

235 

236 # trailer 

237 assert(len(pack_sha) == 20) 

238 sha_write(pack_sha) 

239 sha = sha_writer.sha(as_hex=False) 

240 write(sha) 

241 return sha 

242 

243 

244class PackIndexFile(LazyMixin): 

245 

246 """A pack index provides offsets into the corresponding pack, allowing to find 

247 locations for offsets faster.""" 

248 

249 # Dont use slots as we dynamically bind functions for each version, need a dict for this 

250 # The slots you see here are just to keep track of our instance variables 

251 # __slots__ = ('_indexpath', '_fanout_table', '_cursor', '_version', 

252 # '_sha_list_offset', '_crc_list_offset', '_pack_offset', '_pack_64_offset') 

253 

254 # used in v2 indices 

255 _sha_list_offset = 8 + 1024 

256 index_v2_signature = b'\xfftOc' 

257 index_version_default = 2 

258 

259 def __init__(self, indexpath): 

260 super().__init__() 

261 self._indexpath = indexpath 

262 

263 def close(self): 

264 mman.force_map_handle_removal_win(self._indexpath) 

265 self._cursor = None 

266 

267 def _set_cache_(self, attr): 

268 if attr == "_packfile_checksum": 

269 self._packfile_checksum = self._cursor.map()[-40:-20] 

270 elif attr == "_cursor": 

271 # Note: We don't lock the file when reading as we cannot be sure 

272 # that we can actually write to the location - it could be a read-only 

273 # alternate for instance 

274 self._cursor = mman.make_cursor(self._indexpath).use_region() 

275 # We will assume that the index will always fully fit into memory ! 

276 if mman.window_size() > 0 and self._cursor.file_size() > mman.window_size(): 

277 raise AssertionError("The index file at %s is too large to fit into a mapped window (%i > %i). This is a limitation of the implementation" % ( 

278 self._indexpath, self._cursor.file_size(), mman.window_size())) 

279 # END assert window size 

280 else: 

281 # now its time to initialize everything - if we are here, someone wants 

282 # to access the fanout table or related properties 

283 

284 # CHECK VERSION 

285 mmap = self._cursor.map() 

286 self._version = (mmap[:4] == self.index_v2_signature and 2) or 1 

287 if self._version == 2: 

288 version_id = unpack_from(">L", mmap, 4)[0] 

289 assert version_id == self._version, "Unsupported index version: %i" % version_id 

290 # END assert version 

291 

292 # SETUP FUNCTIONS 

293 # setup our functions according to the actual version 

294 for fname in ('entry', 'offset', 'sha', 'crc'): 

295 setattr(self, fname, getattr(self, "_%s_v%i" % (fname, self._version))) 

296 # END for each function to initialize 

297 

298 # INITIALIZE DATA 

299 # byte offset is 8 if version is 2, 0 otherwise 

300 self._initialize() 

301 # END handle attributes 

302 

303 #{ Access V1 

304 

305 def _entry_v1(self, i): 

306 """:return: tuple(offset, binsha, 0)""" 

307 return unpack_from(">L20s", self._cursor.map(), 1024 + i * 24) + (0, ) 

308 

309 def _offset_v1(self, i): 

310 """see ``_offset_v2``""" 

311 return unpack_from(">L", self._cursor.map(), 1024 + i * 24)[0] 

312 

313 def _sha_v1(self, i): 

314 """see ``_sha_v2``""" 

315 base = 1024 + (i * 24) + 4 

316 return self._cursor.map()[base:base + 20] 

317 

318 def _crc_v1(self, i): 

319 """unsupported""" 

320 return 0 

321 

322 #} END access V1 

323 

324 #{ Access V2 

325 def _entry_v2(self, i): 

326 """:return: tuple(offset, binsha, crc)""" 

327 return (self._offset_v2(i), self._sha_v2(i), self._crc_v2(i)) 

328 

329 def _offset_v2(self, i): 

330 """:return: 32 or 64 byte offset into pack files. 64 byte offsets will only 

331 be returned if the pack is larger than 4 GiB, or 2^32""" 

332 offset = unpack_from(">L", self._cursor.map(), self._pack_offset + i * 4)[0] 

333 

334 # if the high-bit is set, this indicates that we have to lookup the offset 

335 # in the 64 bit region of the file. The current offset ( lower 31 bits ) 

336 # are the index into it 

337 if offset & 0x80000000: 

338 offset = unpack_from(">Q", self._cursor.map(), self._pack_64_offset + (offset & ~0x80000000) * 8)[0] 

339 # END handle 64 bit offset 

340 

341 return offset 

342 

343 def _sha_v2(self, i): 

344 """:return: sha at the given index of this file index instance""" 

345 base = self._sha_list_offset + i * 20 

346 return self._cursor.map()[base:base + 20] 

347 

348 def _crc_v2(self, i): 

349 """:return: 4 bytes crc for the object at index i""" 

350 return unpack_from(">L", self._cursor.map(), self._crc_list_offset + i * 4)[0] 

351 

352 #} END access V2 

353 

354 #{ Initialization 

355 

356 def _initialize(self): 

357 """initialize base data""" 

358 self._fanout_table = self._read_fanout((self._version == 2) * 8) 

359 

360 if self._version == 2: 

361 self._crc_list_offset = self._sha_list_offset + self.size() * 20 

362 self._pack_offset = self._crc_list_offset + self.size() * 4 

363 self._pack_64_offset = self._pack_offset + self.size() * 4 

364 # END setup base 

365 

366 def _read_fanout(self, byte_offset): 

367 """Generate a fanout table from our data""" 

368 d = self._cursor.map() 

369 out = list() 

370 append = out.append 

371 for i in range(256): 

372 append(unpack_from('>L', d, byte_offset + i * 4)[0]) 

373 # END for each entry 

374 return out 

375 

376 #} END initialization 

377 

378 #{ Properties 

379 def version(self): 

380 return self._version 

381 

382 def size(self): 

383 """:return: amount of objects referred to by this index""" 

384 return self._fanout_table[255] 

385 

386 def path(self): 

387 """:return: path to the packindexfile""" 

388 return self._indexpath 

389 

390 def packfile_checksum(self): 

391 """:return: 20 byte sha representing the sha1 hash of the pack file""" 

392 return self._cursor.map()[-40:-20] 

393 

394 def indexfile_checksum(self): 

395 """:return: 20 byte sha representing the sha1 hash of this index file""" 

396 return self._cursor.map()[-20:] 

397 

398 def offsets(self): 

399 """:return: sequence of all offsets in the order in which they were written 

400 

401 **Note:** return value can be random accessed, but may be immmutable""" 

402 if self._version == 2: 

403 # read stream to array, convert to tuple 

404 a = array.array('I') # 4 byte unsigned int, long are 8 byte on 64 bit it appears 

405 a.frombytes(self._cursor.map()[self._pack_offset:self._pack_64_offset]) 

406 

407 # networkbyteorder to something array likes more 

408 if sys.byteorder == 'little': 

409 a.byteswap() 

410 return a 

411 else: 

412 return tuple(self.offset(index) for index in range(self.size())) 

413 # END handle version 

414 

415 def sha_to_index(self, sha): 

416 """ 

417 :return: index usable with the ``offset`` or ``entry`` method, or None 

418 if the sha was not found in this pack index 

419 :param sha: 20 byte sha to lookup""" 

420 first_byte = byte_ord(sha[0]) 

421 get_sha = self.sha 

422 lo = 0 # lower index, the left bound of the bisection 

423 if first_byte != 0: 

424 lo = self._fanout_table[first_byte - 1] 

425 hi = self._fanout_table[first_byte] # the upper, right bound of the bisection 

426 

427 # bisect until we have the sha 

428 while lo < hi: 

429 mid = (lo + hi) // 2 

430 mid_sha = get_sha(mid) 

431 if sha < mid_sha: 

432 hi = mid 

433 elif sha == mid_sha: 

434 return mid 

435 else: 

436 lo = mid + 1 

437 # END handle midpoint 

438 # END bisect 

439 return None 

440 

441 def partial_sha_to_index(self, partial_bin_sha, canonical_length): 

442 """ 

443 :return: index as in `sha_to_index` or None if the sha was not found in this 

444 index file 

445 :param partial_bin_sha: an at least two bytes of a partial binary sha as bytes 

446 :param canonical_length: length of the original hexadecimal representation of the 

447 given partial binary sha 

448 :raise AmbiguousObjectName:""" 

449 if len(partial_bin_sha) < 2: 

450 raise ValueError("Require at least 2 bytes of partial sha") 

451 

452 assert isinstance(partial_bin_sha, bytes), "partial_bin_sha must be bytes" 

453 first_byte = byte_ord(partial_bin_sha[0]) 

454 

455 get_sha = self.sha 

456 lo = 0 # lower index, the left bound of the bisection 

457 if first_byte != 0: 

458 lo = self._fanout_table[first_byte - 1] 

459 hi = self._fanout_table[first_byte] # the upper, right bound of the bisection 

460 

461 # fill the partial to full 20 bytes 

462 filled_sha = partial_bin_sha + NULL_BYTE * (20 - len(partial_bin_sha)) 

463 

464 # find lowest 

465 while lo < hi: 

466 mid = (lo + hi) // 2 

467 mid_sha = get_sha(mid) 

468 if filled_sha < mid_sha: 

469 hi = mid 

470 elif filled_sha == mid_sha: 

471 # perfect match 

472 lo = mid 

473 break 

474 else: 

475 lo = mid + 1 

476 # END handle midpoint 

477 # END bisect 

478 

479 if lo < self.size(): 

480 cur_sha = get_sha(lo) 

481 if is_equal_canonical_sha(canonical_length, partial_bin_sha, cur_sha): 

482 next_sha = None 

483 if lo + 1 < self.size(): 

484 next_sha = get_sha(lo + 1) 

485 if next_sha and next_sha == cur_sha: 

486 raise AmbiguousObjectName(partial_bin_sha) 

487 return lo 

488 # END if we have a match 

489 # END if we found something 

490 return None 

491 

492 if 'PackIndexFile_sha_to_index' in globals(): 

493 # NOTE: Its just about 25% faster, the major bottleneck might be the attr 

494 # accesses 

495 def sha_to_index(self, sha): 

496 return PackIndexFile_sha_to_index(self, sha) 

497 # END redefine heavy-hitter with c version 

498 

499 #} END properties 

500 

501 

502class PackFile(LazyMixin): 

503 

504 """A pack is a file written according to the Version 2 for git packs 

505 

506 As we currently use memory maps, it could be assumed that the maximum size of 

507 packs therefore is 32 bit on 32 bit systems. On 64 bit systems, this should be 

508 fine though. 

509 

510 **Note:** at some point, this might be implemented using streams as well, or 

511 streams are an alternate path in the case memory maps cannot be created 

512 for some reason - one clearly doesn't want to read 10GB at once in that 

513 case""" 

514 

515 __slots__ = ('_packpath', '_cursor', '_size', '_version') 

516 pack_signature = 0x5041434b # 'PACK' 

517 pack_version_default = 2 

518 

519 # offset into our data at which the first object starts 

520 first_object_offset = 3 * 4 # header bytes 

521 footer_size = 20 # final sha 

522 

523 def __init__(self, packpath): 

524 self._packpath = packpath 

525 

526 def close(self): 

527 mman.force_map_handle_removal_win(self._packpath) 

528 self._cursor = None 

529 

530 def _set_cache_(self, attr): 

531 # we fill the whole cache, whichever attribute gets queried first 

532 self._cursor = mman.make_cursor(self._packpath).use_region() 

533 

534 # read the header information 

535 type_id, self._version, self._size = unpack_from(">LLL", self._cursor.map(), 0) 

536 

537 # TODO: figure out whether we should better keep the lock, or maybe 

538 # add a .keep file instead ? 

539 if type_id != self.pack_signature: 

540 raise ParseError("Invalid pack signature: %i" % type_id) 

541 

542 def _iter_objects(self, start_offset, as_stream=True): 

543 """Handle the actual iteration of objects within this pack""" 

544 c = self._cursor 

545 content_size = c.file_size() - self.footer_size 

546 cur_offset = start_offset or self.first_object_offset 

547 

548 null = NullStream() 

549 while cur_offset < content_size: 

550 data_offset, ostream = pack_object_at(c, cur_offset, True) 

551 # scrub the stream to the end - this decompresses the object, but yields 

552 # the amount of compressed bytes we need to get to the next offset 

553 

554 stream_copy(ostream.read, null.write, ostream.size, chunk_size) 

555 assert ostream.stream._br == ostream.size 

556 cur_offset += (data_offset - ostream.pack_offset) + ostream.stream.compressed_bytes_read() 

557 

558 # if a stream is requested, reset it beforehand 

559 # Otherwise return the Stream object directly, its derived from the 

560 # info object 

561 if as_stream: 

562 ostream.stream.seek(0) 

563 yield ostream 

564 # END until we have read everything 

565 

566 #{ Pack Information 

567 

568 def size(self): 

569 """:return: The amount of objects stored in this pack""" 

570 return self._size 

571 

572 def version(self): 

573 """:return: the version of this pack""" 

574 return self._version 

575 

576 def data(self): 

577 """ 

578 :return: read-only data of this pack. It provides random access and usually 

579 is a memory map. 

580 :note: This method is unsafe as it returns a window into a file which might be larger than than the actual window size""" 

581 # can use map as we are starting at offset 0. Otherwise we would have to use buffer() 

582 return self._cursor.use_region().map() 

583 

584 def checksum(self): 

585 """:return: 20 byte sha1 hash on all object sha's contained in this file""" 

586 return self._cursor.use_region(self._cursor.file_size() - 20).buffer()[:] 

587 

588 def path(self): 

589 """:return: path to the packfile""" 

590 return self._packpath 

591 #} END pack information 

592 

593 #{ Pack Specific 

594 

595 def collect_streams(self, offset): 

596 """ 

597 :return: list of pack streams which are required to build the object 

598 at the given offset. The first entry of the list is the object at offset, 

599 the last one is either a full object, or a REF_Delta stream. The latter 

600 type needs its reference object to be locked up in an ODB to form a valid 

601 delta chain. 

602 If the object at offset is no delta, the size of the list is 1. 

603 :param offset: specifies the first byte of the object within this pack""" 

604 out = list() 

605 c = self._cursor 

606 while True: 

607 ostream = pack_object_at(c, offset, True)[1] 

608 out.append(ostream) 

609 if ostream.type_id == OFS_DELTA: 

610 offset = ostream.pack_offset - ostream.delta_info 

611 else: 

612 # the only thing we can lookup are OFFSET deltas. Everything 

613 # else is either an object, or a ref delta, in the latter 

614 # case someone else has to find it 

615 break 

616 # END handle type 

617 # END while chaining streams 

618 return out 

619 

620 #} END pack specific 

621 

622 #{ Read-Database like Interface 

623 

624 def info(self, offset): 

625 """Retrieve information about the object at the given file-absolute offset 

626 

627 :param offset: byte offset 

628 :return: OPackInfo instance, the actual type differs depending on the type_id attribute""" 

629 return pack_object_at(self._cursor, offset or self.first_object_offset, False)[1] 

630 

631 def stream(self, offset): 

632 """Retrieve an object at the given file-relative offset as stream along with its information 

633 

634 :param offset: byte offset 

635 :return: OPackStream instance, the actual type differs depending on the type_id attribute""" 

636 return pack_object_at(self._cursor, offset or self.first_object_offset, True)[1] 

637 

638 def stream_iter(self, start_offset=0): 

639 """ 

640 :return: iterator yielding OPackStream compatible instances, allowing 

641 to access the data in the pack directly. 

642 :param start_offset: offset to the first object to iterate. If 0, iteration 

643 starts at the very first object in the pack. 

644 

645 **Note:** Iterating a pack directly is costly as the datastream has to be decompressed 

646 to determine the bounds between the objects""" 

647 return self._iter_objects(start_offset, as_stream=True) 

648 

649 #} END Read-Database like Interface 

650 

651 

652class PackEntity(LazyMixin): 

653 

654 """Combines the PackIndexFile and the PackFile into one, allowing the 

655 actual objects to be resolved and iterated""" 

656 

657 __slots__ = ('_index', # our index file 

658 '_pack', # our pack file 

659 '_offset_map' # on demand dict mapping one offset to the next consecutive one 

660 ) 

661 

662 IndexFileCls = PackIndexFile 

663 PackFileCls = PackFile 

664 

665 def __init__(self, pack_or_index_path): 

666 """Initialize ourselves with the path to the respective pack or index file""" 

667 basename, ext = os.path.splitext(pack_or_index_path) 

668 self._index = self.IndexFileCls("%s.idx" % basename) # PackIndexFile instance 

669 self._pack = self.PackFileCls("%s.pack" % basename) # corresponding PackFile instance 

670 

671 def close(self): 

672 self._index.close() 

673 self._pack.close() 

674 

675 def _set_cache_(self, attr): 

676 # currently this can only be _offset_map 

677 # TODO: make this a simple sorted offset array which can be bisected 

678 # to find the respective entry, from which we can take a +1 easily 

679 # This might be slower, but should also be much lighter in memory ! 

680 offsets_sorted = sorted(self._index.offsets()) 

681 last_offset = len(self._pack.data()) - self._pack.footer_size 

682 assert offsets_sorted, "Cannot handle empty indices" 

683 

684 offset_map = None 

685 if len(offsets_sorted) == 1: 

686 offset_map = {offsets_sorted[0]: last_offset} 

687 else: 

688 iter_offsets = iter(offsets_sorted) 

689 iter_offsets_plus_one = iter(offsets_sorted) 

690 next(iter_offsets_plus_one) 

691 consecutive = zip(iter_offsets, iter_offsets_plus_one) 

692 

693 offset_map = dict(consecutive) 

694 

695 # the last offset is not yet set 

696 offset_map[offsets_sorted[-1]] = last_offset 

697 # END handle offset amount 

698 self._offset_map = offset_map 

699 

700 def _sha_to_index(self, sha): 

701 """:return: index for the given sha, or raise""" 

702 index = self._index.sha_to_index(sha) 

703 if index is None: 

704 raise BadObject(sha) 

705 return index 

706 

707 def _iter_objects(self, as_stream): 

708 """Iterate over all objects in our index and yield their OInfo or OStream instences""" 

709 _sha = self._index.sha 

710 _object = self._object 

711 for index in range(self._index.size()): 

712 yield _object(_sha(index), as_stream, index) 

713 # END for each index 

714 

715 def _object(self, sha, as_stream, index=-1): 

716 """:return: OInfo or OStream object providing information about the given sha 

717 :param index: if not -1, its assumed to be the sha's index in the IndexFile""" 

718 # its a little bit redundant here, but it needs to be efficient 

719 if index < 0: 

720 index = self._sha_to_index(sha) 

721 if sha is None: 

722 sha = self._index.sha(index) 

723 # END assure sha is present ( in output ) 

724 offset = self._index.offset(index) 

725 type_id, uncomp_size, data_rela_offset = pack_object_header_info(self._pack._cursor.use_region(offset).buffer()) 

726 if as_stream: 

727 if type_id not in delta_types: 

728 packstream = self._pack.stream(offset) 

729 return OStream(sha, packstream.type, packstream.size, packstream.stream) 

730 # END handle non-deltas 

731 

732 # produce a delta stream containing all info 

733 # To prevent it from applying the deltas when querying the size, 

734 # we extract it from the delta stream ourselves 

735 streams = self.collect_streams_at_offset(offset) 

736 dstream = DeltaApplyReader.new(streams) 

737 

738 return ODeltaStream(sha, dstream.type, None, dstream) 

739 else: 

740 if type_id not in delta_types: 

741 return OInfo(sha, type_id_to_type_map[type_id], uncomp_size) 

742 # END handle non-deltas 

743 

744 # deltas are a little tougher - unpack the first bytes to obtain 

745 # the actual target size, as opposed to the size of the delta data 

746 streams = self.collect_streams_at_offset(offset) 

747 buf = streams[0].read(512) 

748 offset, src_size = msb_size(buf) 

749 offset, target_size = msb_size(buf, offset) 

750 

751 # collect the streams to obtain the actual object type 

752 if streams[-1].type_id in delta_types: 

753 raise BadObject(sha, "Could not resolve delta object") 

754 return OInfo(sha, streams[-1].type, target_size) 

755 # END handle stream 

756 

757 #{ Read-Database like Interface 

758 

759 def info(self, sha): 

760 """Retrieve information about the object identified by the given sha 

761 

762 :param sha: 20 byte sha1 

763 :raise BadObject: 

764 :return: OInfo instance, with 20 byte sha""" 

765 return self._object(sha, False) 

766 

767 def stream(self, sha): 

768 """Retrieve an object stream along with its information as identified by the given sha 

769 

770 :param sha: 20 byte sha1 

771 :raise BadObject: 

772 :return: OStream instance, with 20 byte sha""" 

773 return self._object(sha, True) 

774 

775 def info_at_index(self, index): 

776 """As ``info``, but uses a PackIndexFile compatible index to refer to the object""" 

777 return self._object(None, False, index) 

778 

779 def stream_at_index(self, index): 

780 """As ``stream``, but uses a PackIndexFile compatible index to refer to the 

781 object""" 

782 return self._object(None, True, index) 

783 

784 #} END Read-Database like Interface 

785 

786 #{ Interface 

787 

788 def pack(self): 

789 """:return: the underlying pack file instance""" 

790 return self._pack 

791 

792 def index(self): 

793 """:return: the underlying pack index file instance""" 

794 return self._index 

795 

796 def is_valid_stream(self, sha, use_crc=False): 

797 """ 

798 Verify that the stream at the given sha is valid. 

799 

800 :param use_crc: if True, the index' crc is run over the compressed stream of 

801 the object, which is much faster than checking the sha1. It is also 

802 more prone to unnoticed corruption or manipulation. 

803 :param sha: 20 byte sha1 of the object whose stream to verify 

804 whether the compressed stream of the object is valid. If it is 

805 a delta, this only verifies that the delta's data is valid, not the 

806 data of the actual undeltified object, as it depends on more than 

807 just this stream. 

808 If False, the object will be decompressed and the sha generated. It must 

809 match the given sha 

810 

811 :return: True if the stream is valid 

812 :raise UnsupportedOperation: If the index is version 1 only 

813 :raise BadObject: sha was not found""" 

814 if use_crc: 

815 if self._index.version() < 2: 

816 raise UnsupportedOperation("Version 1 indices do not contain crc's, verify by sha instead") 

817 # END handle index version 

818 

819 index = self._sha_to_index(sha) 

820 offset = self._index.offset(index) 

821 next_offset = self._offset_map[offset] 

822 crc_value = self._index.crc(index) 

823 

824 # create the current crc value, on the compressed object data 

825 # Read it in chunks, without copying the data 

826 crc_update = zlib.crc32 

827 pack_data = self._pack.data() 

828 cur_pos = offset 

829 this_crc_value = 0 

830 while cur_pos < next_offset: 

831 rbound = min(cur_pos + chunk_size, next_offset) 

832 size = rbound - cur_pos 

833 this_crc_value = crc_update(pack_data[cur_pos:cur_pos + size], this_crc_value) 

834 cur_pos += size 

835 # END window size loop 

836 

837 # crc returns signed 32 bit numbers, the AND op forces it into unsigned 

838 # mode ... wow, sneaky, from dulwich. 

839 return (this_crc_value & 0xffffffff) == crc_value 

840 else: 

841 shawriter = Sha1Writer() 

842 stream = self._object(sha, as_stream=True) 

843 # write a loose object, which is the basis for the sha 

844 write_object(stream.type, stream.size, stream.read, shawriter.write) 

845 

846 assert shawriter.sha(as_hex=False) == sha 

847 return shawriter.sha(as_hex=False) == sha 

848 # END handle crc/sha verification 

849 

850 def info_iter(self): 

851 """ 

852 :return: Iterator over all objects in this pack. The iterator yields 

853 OInfo instances""" 

854 return self._iter_objects(as_stream=False) 

855 

856 def stream_iter(self): 

857 """ 

858 :return: iterator over all objects in this pack. The iterator yields 

859 OStream instances""" 

860 return self._iter_objects(as_stream=True) 

861 

862 def collect_streams_at_offset(self, offset): 

863 """ 

864 As the version in the PackFile, but can resolve REF deltas within this pack 

865 For more info, see ``collect_streams`` 

866 

867 :param offset: offset into the pack file at which the object can be found""" 

868 streams = self._pack.collect_streams(offset) 

869 

870 # try to resolve the last one if needed. It is assumed to be either 

871 # a REF delta, or a base object, as OFFSET deltas are resolved by the pack 

872 if streams[-1].type_id == REF_DELTA: 

873 stream = streams[-1] 

874 while stream.type_id in delta_types: 

875 if stream.type_id == REF_DELTA: 

876 # smmap can return memory view objects, which can't be compared as buffers/bytes can ... 

877 if isinstance(stream.delta_info, memoryview): 

878 sindex = self._index.sha_to_index(stream.delta_info.tobytes()) 

879 else: 

880 sindex = self._index.sha_to_index(stream.delta_info) 

881 if sindex is None: 

882 break 

883 stream = self._pack.stream(self._index.offset(sindex)) 

884 streams.append(stream) 

885 else: 

886 # must be another OFS DELTA - this could happen if a REF 

887 # delta we resolve previously points to an OFS delta. Who 

888 # would do that ;) ? We can handle it though 

889 stream = self._pack.stream(stream.delta_info) 

890 streams.append(stream) 

891 # END handle ref delta 

892 # END resolve ref streams 

893 # END resolve streams 

894 

895 return streams 

896 

897 def collect_streams(self, sha): 

898 """ 

899 As ``PackFile.collect_streams``, but takes a sha instead of an offset. 

900 Additionally, ref_delta streams will be resolved within this pack. 

901 If this is not possible, the stream will be left alone, hence it is adivsed 

902 to check for unresolved ref-deltas and resolve them before attempting to 

903 construct a delta stream. 

904 

905 :param sha: 20 byte sha1 specifying the object whose related streams you want to collect 

906 :return: list of streams, first being the actual object delta, the last being 

907 a possibly unresolved base object. 

908 :raise BadObject:""" 

909 return self.collect_streams_at_offset(self._index.offset(self._sha_to_index(sha))) 

910 

911 @classmethod 

912 def write_pack(cls, object_iter, pack_write, index_write=None, 

913 object_count=None, zlib_compression=zlib.Z_BEST_SPEED): 

914 """ 

915 Create a new pack by putting all objects obtained by the object_iterator 

916 into a pack which is written using the pack_write method. 

917 The respective index is produced as well if index_write is not Non. 

918 

919 :param object_iter: iterator yielding odb output objects 

920 :param pack_write: function to receive strings to write into the pack stream 

921 :param indx_write: if not None, the function writes the index file corresponding 

922 to the pack. 

923 :param object_count: if you can provide the amount of objects in your iteration, 

924 this would be the place to put it. Otherwise we have to pre-iterate and store 

925 all items into a list to get the number, which uses more memory than necessary. 

926 :param zlib_compression: the zlib compression level to use 

927 :return: tuple(pack_sha, index_binsha) binary sha over all the contents of the pack 

928 and over all contents of the index. If index_write was None, index_binsha will be None 

929 

930 **Note:** The destination of the write functions is up to the user. It could 

931 be a socket, or a file for instance 

932 

933 **Note:** writes only undeltified objects""" 

934 objs = object_iter 

935 if not object_count: 

936 if not isinstance(object_iter, (tuple, list)): 

937 objs = list(object_iter) 

938 # END handle list type 

939 object_count = len(objs) 

940 # END handle object 

941 

942 pack_writer = FlexibleSha1Writer(pack_write) 

943 pwrite = pack_writer.write 

944 ofs = 0 # current offset into the pack file 

945 index = None 

946 wants_index = index_write is not None 

947 

948 # write header 

949 pwrite(pack('>LLL', PackFile.pack_signature, PackFile.pack_version_default, object_count)) 

950 ofs += 12 

951 

952 if wants_index: 

953 index = IndexWriter() 

954 # END handle index header 

955 

956 actual_count = 0 

957 for obj in objs: 

958 actual_count += 1 

959 crc = 0 

960 

961 # object header 

962 hdr = create_pack_object_header(obj.type_id, obj.size) 

963 if index_write: 

964 crc = crc32(hdr) 

965 else: 

966 crc = None 

967 # END handle crc 

968 pwrite(hdr) 

969 

970 # data stream 

971 zstream = zlib.compressobj(zlib_compression) 

972 ostream = obj.stream 

973 br, bw, crc = write_stream_to_pack(ostream.read, pwrite, zstream, base_crc=crc) 

974 assert(br == obj.size) 

975 if wants_index: 

976 index.append(obj.binsha, crc, ofs) 

977 # END handle index 

978 

979 ofs += len(hdr) + bw 

980 if actual_count == object_count: 

981 break 

982 # END abort once we are done 

983 # END for each object 

984 

985 if actual_count != object_count: 

986 raise ValueError( 

987 "Expected to write %i objects into pack, but received only %i from iterators" % (object_count, actual_count)) 

988 # END count assertion 

989 

990 # write footer 

991 pack_sha = pack_writer.sha(as_hex=False) 

992 assert len(pack_sha) == 20 

993 pack_write(pack_sha) 

994 ofs += len(pack_sha) # just for completeness ;) 

995 

996 index_sha = None 

997 if wants_index: 

998 index_sha = index.write(pack_sha, index_write) 

999 # END handle index 

1000 

1001 return pack_sha, index_sha 

1002 

1003 @classmethod 

1004 def create(cls, object_iter, base_dir, object_count=None, zlib_compression=zlib.Z_BEST_SPEED): 

1005 """Create a new on-disk entity comprised of a properly named pack file and a properly named 

1006 and corresponding index file. The pack contains all OStream objects contained in object iter. 

1007 :param base_dir: directory which is to contain the files 

1008 :return: PackEntity instance initialized with the new pack 

1009 

1010 **Note:** for more information on the other parameters see the write_pack method""" 

1011 pack_fd, pack_path = tempfile.mkstemp('', 'pack', base_dir) 

1012 index_fd, index_path = tempfile.mkstemp('', 'index', base_dir) 

1013 pack_write = lambda d: os.write(pack_fd, d) 

1014 index_write = lambda d: os.write(index_fd, d) 

1015 

1016 pack_binsha, index_binsha = cls.write_pack(object_iter, pack_write, index_write, object_count, zlib_compression) 

1017 os.close(pack_fd) 

1018 os.close(index_fd) 

1019 

1020 fmt = "pack-%s.%s" 

1021 new_pack_path = os.path.join(base_dir, fmt % (bin_to_hex(pack_binsha), 'pack')) 

1022 new_index_path = os.path.join(base_dir, fmt % (bin_to_hex(pack_binsha), 'idx')) 

1023 os.rename(pack_path, new_pack_path) 

1024 os.rename(index_path, new_index_path) 

1025 

1026 return cls(new_pack_path) 

1027 

1028 #} END interface