/src/mozilla-central/toolkit/components/url-classifier/HashStore.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | // Originally based on Chrome sources: |
3 | | // Copyright (c) 2010 The Chromium Authors. All rights reserved. |
4 | | // |
5 | | // Redistribution and use in source and binary forms, with or without |
6 | | // modification, are permitted provided that the following conditions are |
7 | | // met: |
8 | | // |
9 | | // * Redistributions of source code must retain the above copyright |
10 | | // notice, this list of conditions and the following disclaimer. |
11 | | // * Redistributions in binary form must reproduce the above |
12 | | // copyright notice, this list of conditions and the following disclaimer |
13 | | // in the documentation and/or other materials provided with the |
14 | | // distribution. |
15 | | // * Neither the name of Google Inc. nor the names of its |
16 | | // contributors may be used to endorse or promote products derived from |
17 | | // this software without specific prior written permission. |
18 | | // |
19 | | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
20 | | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
21 | | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
22 | | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
23 | | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
24 | | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
25 | | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
26 | | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
27 | | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
28 | | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
29 | | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
30 | | |
31 | | |
32 | | #include "HashStore.h" |
33 | | #include "nsICryptoHash.h" |
34 | | #include "nsISeekableStream.h" |
35 | | #include "nsIStreamConverterService.h" |
36 | | #include "nsNetUtil.h" |
37 | | #include "nsCheckSummedOutputStream.h" |
38 | | #include "prio.h" |
39 | | #include "mozilla/Logging.h" |
40 | | #include "mozilla/IntegerPrintfMacros.h" |
41 | | #include "zlib.h" |
42 | | #include "Classifier.h" |
43 | | #include "nsUrlClassifierDBService.h" |
44 | | #include "mozilla/Telemetry.h" |
45 | | |
46 | | // Main store for SafeBrowsing protocol data. We store |
47 | | // known add/sub chunks, prefixes and completions in memory |
48 | | // during an update, and serialize to disk. |
49 | | // We do not store the add prefixes, those are retrieved by |
50 | | // decompressing the PrefixSet cache whenever we need to apply |
51 | | // an update. |
52 | | // |
53 | | // byte slicing: Many of the 4-byte values stored here are strongly |
54 | | // correlated in the upper bytes, and uncorrelated in the lower |
55 | | // bytes. Because zlib/DEFLATE requires match lengths of at least |
56 | | // 3 to achieve good compression, and we don't get those if only |
57 | | // the upper 16-bits are correlated, it is worthwhile to slice 32-bit |
58 | | // values into 4 1-byte slices and compress the slices individually. |
59 | | // The slices corresponding to MSBs will compress very well, and the |
60 | | // slice corresponding to LSB almost nothing. Because of this, we |
61 | | // only apply DEFLATE to the 3 most significant bytes, and store the |
62 | | // LSB uncompressed. |
63 | | // |
64 | | // byte sliced (numValues) data format: |
65 | | // uint32_t compressed-size |
66 | | // compressed-size bytes zlib DEFLATE data |
67 | | // 0...numValues byte MSB of 4-byte numValues data |
68 | | // uint32_t compressed-size |
69 | | // compressed-size bytes zlib DEFLATE data |
70 | | // 0...numValues byte 2nd byte of 4-byte numValues data |
71 | | // uint32_t compressed-size |
72 | | // compressed-size bytes zlib DEFLATE data |
73 | | // 0...numValues byte 3rd byte of 4-byte numValues data |
74 | | // 0...numValues byte LSB of 4-byte numValues data |
75 | | // |
76 | | // Store data format: |
77 | | // uint32_t magic |
78 | | // uint32_t version |
79 | | // uint32_t numAddChunks |
80 | | // uint32_t numSubChunks |
81 | | // uint32_t numAddPrefixes |
82 | | // uint32_t numSubPrefixes |
83 | | // uint32_t numAddCompletes |
84 | | // uint32_t numSubCompletes |
85 | | // 0...numAddChunks uint32_t addChunk |
86 | | // 0...numSubChunks uint32_t subChunk |
87 | | // byte sliced (numAddPrefixes) uint32_t add chunk of AddPrefixes |
88 | | // byte sliced (numSubPrefixes) uint32_t add chunk of SubPrefixes |
89 | | // byte sliced (numSubPrefixes) uint32_t sub chunk of SubPrefixes |
90 | | // byte sliced (numSubPrefixes) uint32_t SubPrefixes |
91 | | // 0...numAddCompletes 32-byte Completions + uint32_t addChunk |
92 | | // 0...numSubCompletes 32-byte Completions + uint32_t addChunk |
93 | | // + uint32_t subChunk |
94 | | // 16-byte MD5 of all preceding data |
95 | | |
96 | | // Name of the SafeBrowsing store |
97 | | #define STORE_SUFFIX ".sbstore" |
98 | | |
99 | | // MOZ_LOG=UrlClassifierDbService:5 |
100 | | extern mozilla::LazyLogModule gUrlClassifierDbServiceLog; |
101 | 0 | #define LOG(args) MOZ_LOG(gUrlClassifierDbServiceLog, mozilla::LogLevel::Debug, args) |
102 | 0 | #define LOG_ENABLED() MOZ_LOG_TEST(gUrlClassifierDbServiceLog, mozilla::LogLevel::Debug) |
103 | | |
104 | | namespace mozilla { |
105 | | namespace safebrowsing { |
106 | | |
107 | | const uint32_t STORE_MAGIC = 0x1231af3b; |
108 | | const uint32_t CURRENT_VERSION = 3; |
109 | | |
110 | | nsresult |
111 | | TableUpdateV2::NewAddPrefix(uint32_t aAddChunk, const Prefix& aHash) |
112 | 0 | { |
113 | 0 | AddPrefix *add = mAddPrefixes.AppendElement(fallible); |
114 | 0 | if (!add) return NS_ERROR_OUT_OF_MEMORY; |
115 | 0 | add->addChunk = aAddChunk; |
116 | 0 | add->prefix = aHash; |
117 | 0 | return NS_OK; |
118 | 0 | } |
119 | | |
120 | | nsresult |
121 | | TableUpdateV2::NewSubPrefix(uint32_t aAddChunk, const Prefix& aHash, uint32_t aSubChunk) |
122 | 0 | { |
123 | 0 | SubPrefix *sub = mSubPrefixes.AppendElement(fallible); |
124 | 0 | if (!sub) return NS_ERROR_OUT_OF_MEMORY; |
125 | 0 | sub->addChunk = aAddChunk; |
126 | 0 | sub->prefix = aHash; |
127 | 0 | sub->subChunk = aSubChunk; |
128 | 0 | return NS_OK; |
129 | 0 | } |
130 | | |
131 | | nsresult |
132 | | TableUpdateV2::NewAddComplete(uint32_t aAddChunk, const Completion& aHash) |
133 | 0 | { |
134 | 0 | AddComplete *add = mAddCompletes.AppendElement(fallible); |
135 | 0 | if (!add) return NS_ERROR_OUT_OF_MEMORY; |
136 | 0 | add->addChunk = aAddChunk; |
137 | 0 | add->complete = aHash; |
138 | 0 | return NS_OK; |
139 | 0 | } |
140 | | |
141 | | nsresult |
142 | | TableUpdateV2::NewSubComplete(uint32_t aAddChunk, const Completion& aHash, uint32_t aSubChunk) |
143 | 0 | { |
144 | 0 | SubComplete *sub = mSubCompletes.AppendElement(fallible); |
145 | 0 | if (!sub) return NS_ERROR_OUT_OF_MEMORY; |
146 | 0 | sub->addChunk = aAddChunk; |
147 | 0 | sub->complete = aHash; |
148 | 0 | sub->subChunk = aSubChunk; |
149 | 0 | return NS_OK; |
150 | 0 | } |
151 | | |
152 | | nsresult |
153 | | TableUpdateV2::NewMissPrefix(const Prefix& aPrefix) |
154 | 0 | { |
155 | 0 | Prefix *prefix = mMissPrefixes.AppendElement(aPrefix, fallible); |
156 | 0 | if (!prefix) return NS_ERROR_OUT_OF_MEMORY; |
157 | 0 | return NS_OK; |
158 | 0 | } |
159 | | |
160 | | void |
161 | | TableUpdateV4::NewPrefixes(int32_t aSize, const nsACString& aPrefixes) |
162 | 0 | { |
163 | 0 | NS_ENSURE_TRUE_VOID(aSize >= 4 && aSize <= COMPLETE_SIZE); |
164 | 0 | NS_ENSURE_TRUE_VOID(aPrefixes.Length() % aSize == 0); |
165 | 0 | NS_ENSURE_TRUE_VOID(!mPrefixesMap.Get(aSize)); |
166 | 0 |
|
167 | 0 | int numOfPrefixes = aPrefixes.Length() / aSize; |
168 | 0 |
|
169 | 0 | if (aSize > 4) { |
170 | 0 | // TODO Bug 1364043 we may have a better API to record multiple samples into |
171 | 0 | // histograms with one call |
172 | 0 | #ifdef NIGHTLY_BUILD |
173 | 0 | for (int i = 0; i < std::min(20, numOfPrefixes); i++) { |
174 | 0 | Telemetry::Accumulate(Telemetry::URLCLASSIFIER_VLPS_LONG_PREFIXES, aSize); |
175 | 0 | } |
176 | 0 | #endif |
177 | 0 | } else if (LOG_ENABLED()) { |
178 | 0 | const uint32_t* p = |
179 | 0 | reinterpret_cast<const uint32_t*>(ToNewCString(aPrefixes)); |
180 | 0 |
|
181 | 0 | // Dump the first/last 10 fixed-length prefixes for debugging. |
182 | 0 | LOG(("* The first 10 (maximum) fixed-length prefixes: ")); |
183 | 0 | for (int i = 0; i < std::min(10, numOfPrefixes); i++) { |
184 | 0 | const uint8_t* c = reinterpret_cast<const uint8_t*>(&p[i]); |
185 | 0 | LOG(("%.2X%.2X%.2X%.2X", c[0], c[1], c[2], c[3])); |
186 | 0 | } |
187 | 0 |
|
188 | 0 | LOG(("* The last 10 (maximum) fixed-length prefixes: ")); |
189 | 0 | for (int i = std::max(0, numOfPrefixes - 10); i < numOfPrefixes; i++) { |
190 | 0 | const uint8_t* c = reinterpret_cast<const uint8_t*>(&p[i]); |
191 | 0 | LOG(("%.2X%.2X%.2X%.2X", c[0], c[1], c[2], c[3])); |
192 | 0 | } |
193 | 0 |
|
194 | 0 | LOG(("---- %u fixed-length prefixes in total.", aPrefixes.Length() / aSize)); |
195 | 0 | } |
196 | 0 |
|
197 | 0 | mPrefixesMap.Put(aSize, new nsCString(aPrefixes)); |
198 | 0 | } |
199 | | |
200 | | nsresult |
201 | | TableUpdateV4::NewRemovalIndices(const uint32_t* aIndices, size_t aNumOfIndices) |
202 | 0 | { |
203 | 0 | MOZ_ASSERT(mRemovalIndiceArray.IsEmpty(), "mRemovalIndiceArray must be empty"); |
204 | 0 |
|
205 | 0 | if (!mRemovalIndiceArray.SetCapacity(aNumOfIndices, fallible)) { |
206 | 0 | return NS_ERROR_OUT_OF_MEMORY; |
207 | 0 | } |
208 | 0 | |
209 | 0 | for (size_t i = 0; i < aNumOfIndices; i++) { |
210 | 0 | mRemovalIndiceArray.AppendElement(aIndices[i]); |
211 | 0 | } |
212 | 0 | return NS_OK; |
213 | 0 | } |
214 | | |
215 | | void |
216 | | TableUpdateV4::NewChecksum(const std::string& aChecksum) |
217 | 0 | { |
218 | 0 | mChecksum.Assign(aChecksum.data(), aChecksum.size()); |
219 | 0 | } |
220 | | |
221 | | nsresult |
222 | | TableUpdateV4::NewFullHashResponse(const Prefix& aPrefix, |
223 | | const CachedFullHashResponse& aResponse) |
224 | 0 | { |
225 | 0 | CachedFullHashResponse* response = |
226 | 0 | mFullHashResponseMap.LookupOrAdd(aPrefix.ToUint32()); |
227 | 0 | if (!response) { |
228 | 0 | return NS_ERROR_OUT_OF_MEMORY; |
229 | 0 | } |
230 | 0 | *response = aResponse; |
231 | 0 | return NS_OK; |
232 | 0 | } |
233 | | |
234 | | HashStore::HashStore(const nsACString& aTableName, |
235 | | const nsACString& aProvider, |
236 | | nsIFile* aRootStoreDir) |
237 | | : mTableName(aTableName) |
238 | | , mInUpdate(false) |
239 | | , mFileSize(0) |
240 | 0 | { |
241 | 0 | nsresult rv = Classifier::GetPrivateStoreDirectory(aRootStoreDir, |
242 | 0 | aTableName, |
243 | 0 | aProvider, |
244 | 0 | getter_AddRefs(mStoreDirectory)); |
245 | 0 | if (NS_FAILED(rv)) { |
246 | 0 | LOG(("Failed to get private store directory for %s", mTableName.get())); |
247 | 0 | mStoreDirectory = aRootStoreDir; |
248 | 0 | } |
249 | 0 | } |
250 | | |
251 | 0 | HashStore::~HashStore() |
252 | | = default; |
253 | | |
254 | | nsresult |
255 | | HashStore::Reset() |
256 | 0 | { |
257 | 0 | LOG(("HashStore resetting")); |
258 | 0 |
|
259 | 0 | nsCOMPtr<nsIFile> storeFile; |
260 | 0 | nsresult rv = mStoreDirectory->Clone(getter_AddRefs(storeFile)); |
261 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
262 | 0 |
|
263 | 0 | rv = storeFile->AppendNative(mTableName + NS_LITERAL_CSTRING(STORE_SUFFIX)); |
264 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
265 | 0 |
|
266 | 0 | rv = storeFile->Remove(false); |
267 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
268 | 0 |
|
269 | 0 | mFileSize = 0; |
270 | 0 |
|
271 | 0 | return NS_OK; |
272 | 0 | } |
273 | | |
274 | | nsresult |
275 | | HashStore::CheckChecksum(uint32_t aFileSize) |
276 | 0 | { |
277 | 0 | if (!mInputStream) { |
278 | 0 | return NS_OK; |
279 | 0 | } |
280 | 0 | |
281 | 0 | // Check for file corruption by |
282 | 0 | // comparing the stored checksum to actual checksum of data |
283 | 0 | nsAutoCString hash; |
284 | 0 | nsAutoCString compareHash; |
285 | 0 | uint32_t read; |
286 | 0 |
|
287 | 0 | nsresult rv = CalculateChecksum(hash, aFileSize, true); |
288 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
289 | 0 |
|
290 | 0 | compareHash.SetLength(hash.Length()); |
291 | 0 |
|
292 | 0 | if (hash.Length() > aFileSize) { |
293 | 0 | NS_WARNING("SafeBrowing file not long enough to store its hash"); |
294 | 0 | return NS_ERROR_FAILURE; |
295 | 0 | } |
296 | 0 | nsCOMPtr<nsISeekableStream> seekIn = do_QueryInterface(mInputStream); |
297 | 0 | rv = seekIn->Seek(nsISeekableStream::NS_SEEK_SET, aFileSize - hash.Length()); |
298 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
299 | 0 |
|
300 | 0 | rv = mInputStream->Read(compareHash.BeginWriting(), hash.Length(), &read); |
301 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
302 | 0 | NS_ASSERTION(read == hash.Length(), "Could not read hash bytes"); |
303 | 0 |
|
304 | 0 | if (!hash.Equals(compareHash)) { |
305 | 0 | NS_WARNING("Safebrowing file failed checksum."); |
306 | 0 | return NS_ERROR_FAILURE; |
307 | 0 | } |
308 | 0 |
|
309 | 0 | return NS_OK; |
310 | 0 | } |
311 | | |
312 | | nsresult |
313 | | HashStore::Open() |
314 | 0 | { |
315 | 0 | nsCOMPtr<nsIFile> storeFile; |
316 | 0 | nsresult rv = mStoreDirectory->Clone(getter_AddRefs(storeFile)); |
317 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
318 | 0 |
|
319 | 0 | rv = storeFile->AppendNative(mTableName + NS_LITERAL_CSTRING(".sbstore")); |
320 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
321 | 0 |
|
322 | 0 | nsCOMPtr<nsIInputStream> origStream; |
323 | 0 | rv = NS_NewLocalFileInputStream(getter_AddRefs(origStream), storeFile, |
324 | 0 | PR_RDONLY | nsIFile::OS_READAHEAD); |
325 | 0 |
|
326 | 0 | if (rv == NS_ERROR_FILE_NOT_FOUND) { |
327 | 0 | UpdateHeader(); |
328 | 0 | return NS_OK; |
329 | 0 | } |
330 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
331 | 0 |
|
332 | 0 | int64_t fileSize; |
333 | 0 | rv = storeFile->GetFileSize(&fileSize); |
334 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
335 | 0 |
|
336 | 0 | if (fileSize < 0 || fileSize > UINT32_MAX) { |
337 | 0 | return NS_ERROR_FAILURE; |
338 | 0 | } |
339 | 0 | |
340 | 0 | mFileSize = static_cast<uint32_t>(fileSize); |
341 | 0 | rv = NS_NewBufferedInputStream(getter_AddRefs(mInputStream), |
342 | 0 | origStream.forget(), mFileSize); |
343 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
344 | 0 |
|
345 | 0 | rv = ReadHeader(); |
346 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
347 | 0 |
|
348 | 0 | rv = SanityCheck(); |
349 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
350 | 0 |
|
351 | 0 | return NS_OK; |
352 | 0 | } |
353 | | |
354 | | nsresult |
355 | | HashStore::ReadHeader() |
356 | 0 | { |
357 | 0 | if (!mInputStream) { |
358 | 0 | UpdateHeader(); |
359 | 0 | return NS_OK; |
360 | 0 | } |
361 | 0 | |
362 | 0 | nsCOMPtr<nsISeekableStream> seekable = do_QueryInterface(mInputStream); |
363 | 0 | nsresult rv = seekable->Seek(nsISeekableStream::NS_SEEK_SET, 0); |
364 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
365 | 0 |
|
366 | 0 | void *buffer = &mHeader; |
367 | 0 | rv = NS_ReadInputStreamToBuffer(mInputStream, |
368 | 0 | &buffer, |
369 | 0 | sizeof(Header)); |
370 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
371 | 0 |
|
372 | 0 | return NS_OK; |
373 | 0 | } |
374 | | |
375 | | nsresult |
376 | | HashStore::SanityCheck() const |
377 | 0 | { |
378 | 0 | if (mHeader.magic != STORE_MAGIC || mHeader.version != CURRENT_VERSION) { |
379 | 0 | NS_WARNING("Unexpected header data in the store."); |
380 | 0 | return NS_ERROR_FAILURE; |
381 | 0 | } |
382 | 0 |
|
383 | 0 | return NS_OK; |
384 | 0 | } |
385 | | |
386 | | nsresult |
387 | | HashStore::CalculateChecksum(nsAutoCString& aChecksum, |
388 | | uint32_t aFileSize, |
389 | | bool aChecksumPresent) |
390 | 0 | { |
391 | 0 | aChecksum.Truncate(); |
392 | 0 |
|
393 | 0 | // Reset mInputStream to start |
394 | 0 | nsCOMPtr<nsISeekableStream> seekable = do_QueryInterface(mInputStream); |
395 | 0 | nsresult rv = seekable->Seek(nsISeekableStream::NS_SEEK_SET, 0); |
396 | 0 |
|
397 | 0 | nsCOMPtr<nsICryptoHash> hash = do_CreateInstance(NS_CRYPTO_HASH_CONTRACTID, &rv); |
398 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
399 | 0 |
|
400 | 0 | // Size of MD5 hash in bytes |
401 | 0 | const uint32_t CHECKSUM_SIZE = 16; |
402 | 0 |
|
403 | 0 | // MD5 is not a secure hash function, but since this is a filesystem integrity |
404 | 0 | // check, this usage is ok. |
405 | 0 | rv = hash->Init(nsICryptoHash::MD5); |
406 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
407 | 0 |
|
408 | 0 | if (!aChecksumPresent) { |
409 | 0 | // Hash entire file |
410 | 0 | rv = hash->UpdateFromStream(mInputStream, UINT32_MAX); |
411 | 0 | } else { |
412 | 0 | // Hash everything but last checksum bytes |
413 | 0 | if (aFileSize < CHECKSUM_SIZE) { |
414 | 0 | NS_WARNING("SafeBrowsing file isn't long enough to store its checksum"); |
415 | 0 | return NS_ERROR_FAILURE; |
416 | 0 | } |
417 | 0 | rv = hash->UpdateFromStream(mInputStream, aFileSize - CHECKSUM_SIZE); |
418 | 0 | } |
419 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
420 | 0 |
|
421 | 0 | rv = hash->Finish(false, aChecksum); |
422 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
423 | 0 |
|
424 | 0 | return NS_OK; |
425 | 0 | } |
426 | | |
427 | | void |
428 | | HashStore::UpdateHeader() |
429 | 0 | { |
430 | 0 | mHeader.magic = STORE_MAGIC; |
431 | 0 | mHeader.version = CURRENT_VERSION; |
432 | 0 |
|
433 | 0 | mHeader.numAddChunks = mAddChunks.Length(); |
434 | 0 | mHeader.numSubChunks = mSubChunks.Length(); |
435 | 0 | mHeader.numAddPrefixes = mAddPrefixes.Length(); |
436 | 0 | mHeader.numSubPrefixes = mSubPrefixes.Length(); |
437 | 0 | mHeader.numAddCompletes = mAddCompletes.Length(); |
438 | 0 | mHeader.numSubCompletes = mSubCompletes.Length(); |
439 | 0 | } |
440 | | |
441 | | nsresult |
442 | | HashStore::ReadChunkNumbers() |
443 | 0 | { |
444 | 0 | if (!mInputStream || AlreadyReadChunkNumbers()) { |
445 | 0 | return NS_OK; |
446 | 0 | } |
447 | 0 | |
448 | 0 | nsCOMPtr<nsISeekableStream> seekable = do_QueryInterface(mInputStream); |
449 | 0 | nsresult rv = seekable->Seek(nsISeekableStream::NS_SEEK_SET, |
450 | 0 | sizeof(Header)); |
451 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
452 | 0 |
|
453 | 0 | rv = mAddChunks.Read(mInputStream, mHeader.numAddChunks); |
454 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
455 | 0 | NS_ASSERTION(mAddChunks.Length() == mHeader.numAddChunks, "Read the right amount of add chunks."); |
456 | 0 |
|
457 | 0 | rv = mSubChunks.Read(mInputStream, mHeader.numSubChunks); |
458 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
459 | 0 | NS_ASSERTION(mSubChunks.Length() == mHeader.numSubChunks, "Read the right amount of sub chunks."); |
460 | 0 |
|
461 | 0 | return NS_OK; |
462 | 0 | } |
463 | | |
464 | | nsresult |
465 | | HashStore::ReadHashes() |
466 | 0 | { |
467 | 0 | if (!mInputStream) { |
468 | 0 | // BeginUpdate has been called but Open hasn't initialized mInputStream, |
469 | 0 | // because the existing HashStore is empty. |
470 | 0 | return NS_OK; |
471 | 0 | } |
472 | 0 | |
473 | 0 | nsCOMPtr<nsISeekableStream> seekable = do_QueryInterface(mInputStream); |
474 | 0 |
|
475 | 0 | uint32_t offset = sizeof(Header); |
476 | 0 | offset += (mHeader.numAddChunks + mHeader.numSubChunks) * sizeof(uint32_t); |
477 | 0 | nsresult rv = seekable->Seek(nsISeekableStream::NS_SEEK_SET, offset); |
478 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
479 | 0 |
|
480 | 0 | rv = ReadAddPrefixes(); |
481 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
482 | 0 |
|
483 | 0 | rv = ReadSubPrefixes(); |
484 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
485 | 0 |
|
486 | 0 | // If completions was read before, then we are done here. |
487 | 0 | if (AlreadyReadCompletions()) { |
488 | 0 | return NS_OK; |
489 | 0 | } |
490 | 0 | |
491 | 0 | rv = ReadTArray(mInputStream, &mAddCompletes, mHeader.numAddCompletes); |
492 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
493 | 0 |
|
494 | 0 | rv = ReadTArray(mInputStream, &mSubCompletes, mHeader.numSubCompletes); |
495 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
496 | 0 |
|
497 | 0 | return NS_OK; |
498 | 0 | } |
499 | | |
500 | | |
501 | | nsresult |
502 | | HashStore::ReadCompletions() |
503 | 0 | { |
504 | 0 | if (!mInputStream || AlreadyReadCompletions()) { |
505 | 0 | return NS_OK; |
506 | 0 | } |
507 | 0 | |
508 | 0 | nsCOMPtr<nsIFile> storeFile; |
509 | 0 | nsresult rv = mStoreDirectory->Clone(getter_AddRefs(storeFile)); |
510 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
511 | 0 |
|
512 | 0 | rv = storeFile->AppendNative(mTableName + NS_LITERAL_CSTRING(STORE_SUFFIX)); |
513 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
514 | 0 |
|
515 | 0 | uint32_t offset = mFileSize - |
516 | 0 | sizeof(struct AddComplete) * mHeader.numAddCompletes - |
517 | 0 | sizeof(struct SubComplete) * mHeader.numSubCompletes - |
518 | 0 | nsCheckSummedOutputStream::CHECKSUM_SIZE; |
519 | 0 |
|
520 | 0 | nsCOMPtr<nsISeekableStream> seekable = do_QueryInterface(mInputStream); |
521 | 0 |
|
522 | 0 | rv = seekable->Seek(nsISeekableStream::NS_SEEK_SET, offset); |
523 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
524 | 0 |
|
525 | 0 | rv = ReadTArray(mInputStream, &mAddCompletes, mHeader.numAddCompletes); |
526 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
527 | 0 |
|
528 | 0 | rv = ReadTArray(mInputStream, &mSubCompletes, mHeader.numSubCompletes); |
529 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
530 | 0 |
|
531 | 0 | return NS_OK; |
532 | 0 | } |
533 | | |
534 | | nsresult |
535 | | HashStore::PrepareForUpdate() |
536 | 0 | { |
537 | 0 | nsresult rv = CheckChecksum(mFileSize); |
538 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
539 | 0 |
|
540 | 0 | rv = ReadChunkNumbers(); |
541 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
542 | 0 |
|
543 | 0 | rv = ReadHashes(); |
544 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
545 | 0 |
|
546 | 0 | return NS_OK; |
547 | 0 | } |
548 | | |
549 | | nsresult |
550 | | HashStore::BeginUpdate() |
551 | 0 | { |
552 | 0 | // Check wether the file is corrupted and read the rest of the store |
553 | 0 | // in memory. |
554 | 0 | nsresult rv = PrepareForUpdate(); |
555 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
556 | 0 |
|
557 | 0 | // Close input stream, won't be needed any more and |
558 | 0 | // we will rewrite ourselves. |
559 | 0 | if (mInputStream) { |
560 | 0 | rv = mInputStream->Close(); |
561 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
562 | 0 | } |
563 | 0 |
|
564 | 0 | mInUpdate = true; |
565 | 0 |
|
566 | 0 | return NS_OK; |
567 | 0 | } |
568 | | |
569 | | template<class T> |
570 | | static nsresult |
571 | | Merge(ChunkSet* aStoreChunks, |
572 | | FallibleTArray<T>* aStorePrefixes, |
573 | | const ChunkSet& aUpdateChunks, |
574 | | FallibleTArray<T>& aUpdatePrefixes, |
575 | | bool aAllowMerging = false) |
576 | 0 | { |
577 | 0 | EntrySort(aUpdatePrefixes); |
578 | 0 |
|
579 | 0 | auto storeIter = aStorePrefixes->begin(); |
580 | 0 | auto storeEnd = aStorePrefixes->end(); |
581 | 0 |
|
582 | 0 | // use a separate array so we can keep the iterators valid |
583 | 0 | // if the nsTArray grows |
584 | 0 | nsTArray<T> adds; |
585 | 0 |
|
586 | 0 | for (const auto& updatePrefix : aUpdatePrefixes) { |
587 | 0 | // skip this chunk if we already have it, unless we're |
588 | 0 | // merging completions, in which case we'll always already |
589 | 0 | // have the chunk from the original prefix |
590 | 0 | if (aStoreChunks->Has(updatePrefix.Chunk())) |
591 | 0 | if (!aAllowMerging) |
592 | 0 | continue; |
593 | 0 | // XXX: binary search for insertion point might be faster in common |
594 | 0 | // case? |
595 | 0 | while (storeIter < storeEnd && (storeIter->Compare(updatePrefix) < 0)) { |
596 | 0 | // skip forward to matching element (or not...) |
597 | 0 | storeIter++; |
598 | 0 | } |
599 | 0 | // no match, add |
600 | 0 | if (storeIter == storeEnd |
601 | 0 | || storeIter->Compare(updatePrefix) != 0) { |
602 | 0 | if (!adds.AppendElement(updatePrefix, fallible)) { |
603 | 0 | return NS_ERROR_OUT_OF_MEMORY; |
604 | 0 | } |
605 | 0 | } |
606 | 0 | } |
607 | 0 |
|
608 | 0 | // Chunks can be empty, but we should still report we have them |
609 | 0 | // to make the chunkranges continuous. |
610 | 0 | aStoreChunks->Merge(aUpdateChunks); |
611 | 0 |
|
612 | 0 | if (!aStorePrefixes->AppendElements(adds, fallible)) |
613 | 0 | return NS_ERROR_OUT_OF_MEMORY; |
614 | 0 | |
615 | 0 | EntrySort(*aStorePrefixes); |
616 | 0 |
|
617 | 0 | return NS_OK; |
618 | 0 | } Unexecuted instantiation: HashStore.cpp:nsresult mozilla::safebrowsing::Merge<mozilla::safebrowsing::AddPrefix>(mozilla::safebrowsing::ChunkSet*, FallibleTArray<mozilla::safebrowsing::AddPrefix>*, mozilla::safebrowsing::ChunkSet const&, FallibleTArray<mozilla::safebrowsing::AddPrefix>&, bool) Unexecuted instantiation: HashStore.cpp:nsresult mozilla::safebrowsing::Merge<mozilla::safebrowsing::AddComplete>(mozilla::safebrowsing::ChunkSet*, FallibleTArray<mozilla::safebrowsing::AddComplete>*, mozilla::safebrowsing::ChunkSet const&, FallibleTArray<mozilla::safebrowsing::AddComplete>&, bool) Unexecuted instantiation: HashStore.cpp:nsresult mozilla::safebrowsing::Merge<mozilla::safebrowsing::SubPrefix>(mozilla::safebrowsing::ChunkSet*, FallibleTArray<mozilla::safebrowsing::SubPrefix>*, mozilla::safebrowsing::ChunkSet const&, FallibleTArray<mozilla::safebrowsing::SubPrefix>&, bool) Unexecuted instantiation: HashStore.cpp:nsresult mozilla::safebrowsing::Merge<mozilla::safebrowsing::SubComplete>(mozilla::safebrowsing::ChunkSet*, FallibleTArray<mozilla::safebrowsing::SubComplete>*, mozilla::safebrowsing::ChunkSet const&, FallibleTArray<mozilla::safebrowsing::SubComplete>&, bool) |
619 | | |
620 | | nsresult |
621 | | HashStore::ApplyUpdate(RefPtr<TableUpdateV2> aUpdate) |
622 | 0 | { |
623 | 0 | MOZ_ASSERT(mTableName.Equals(aUpdate->TableName())); |
624 | 0 |
|
625 | 0 | nsresult rv = mAddExpirations.Merge(aUpdate->AddExpirations()); |
626 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
627 | 0 |
|
628 | 0 | rv = mSubExpirations.Merge(aUpdate->SubExpirations()); |
629 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
630 | 0 |
|
631 | 0 | rv = Expire(); |
632 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
633 | 0 |
|
634 | 0 | rv = Merge(&mAddChunks, &mAddPrefixes, |
635 | 0 | aUpdate->AddChunks(), aUpdate->AddPrefixes()); |
636 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
637 | 0 |
|
638 | 0 | rv = Merge(&mAddChunks, &mAddCompletes, |
639 | 0 | aUpdate->AddChunks(), aUpdate->AddCompletes(), true); |
640 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
641 | 0 |
|
642 | 0 | rv = Merge(&mSubChunks, &mSubPrefixes, |
643 | 0 | aUpdate->SubChunks(), aUpdate->SubPrefixes()); |
644 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
645 | 0 |
|
646 | 0 | rv = Merge(&mSubChunks, &mSubCompletes, |
647 | 0 | aUpdate->SubChunks(), aUpdate->SubCompletes(), true); |
648 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
649 | 0 |
|
650 | 0 | return NS_OK; |
651 | 0 | } |
652 | | |
653 | | nsresult |
654 | | HashStore::Rebuild() |
655 | 0 | { |
656 | 0 | NS_ASSERTION(mInUpdate, "Must be in update to rebuild."); |
657 | 0 |
|
658 | 0 | nsresult rv = ProcessSubs(); |
659 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
660 | 0 |
|
661 | 0 | UpdateHeader(); |
662 | 0 |
|
663 | 0 | return NS_OK; |
664 | 0 | } |
665 | | |
666 | | void |
667 | | HashStore::ClearCompletes() |
668 | 0 | { |
669 | 0 | NS_ASSERTION(mInUpdate, "Must be in update to clear completes."); |
670 | 0 |
|
671 | 0 | mAddCompletes.Clear(); |
672 | 0 | mSubCompletes.Clear(); |
673 | 0 |
|
674 | 0 | UpdateHeader(); |
675 | 0 | } |
676 | | |
677 | | template<class T> |
678 | | static void |
679 | | ExpireEntries(FallibleTArray<T>* aEntries, ChunkSet& aExpirations) |
680 | 0 | { |
681 | 0 | auto addIter = aEntries->begin(); |
682 | 0 |
|
683 | 0 | for (const auto& entry : *aEntries) { |
684 | 0 | if (!aExpirations.Has(entry.Chunk())) { |
685 | 0 | *addIter = entry; |
686 | 0 | addIter++; |
687 | 0 | } |
688 | 0 | } |
689 | 0 |
|
690 | 0 | aEntries->TruncateLength(addIter - aEntries->begin()); |
691 | 0 | } Unexecuted instantiation: HashStore.cpp:void mozilla::safebrowsing::ExpireEntries<mozilla::safebrowsing::AddPrefix>(FallibleTArray<mozilla::safebrowsing::AddPrefix>*, mozilla::safebrowsing::ChunkSet&) Unexecuted instantiation: HashStore.cpp:void mozilla::safebrowsing::ExpireEntries<mozilla::safebrowsing::AddComplete>(FallibleTArray<mozilla::safebrowsing::AddComplete>*, mozilla::safebrowsing::ChunkSet&) Unexecuted instantiation: HashStore.cpp:void mozilla::safebrowsing::ExpireEntries<mozilla::safebrowsing::SubPrefix>(FallibleTArray<mozilla::safebrowsing::SubPrefix>*, mozilla::safebrowsing::ChunkSet&) Unexecuted instantiation: HashStore.cpp:void mozilla::safebrowsing::ExpireEntries<mozilla::safebrowsing::SubComplete>(FallibleTArray<mozilla::safebrowsing::SubComplete>*, mozilla::safebrowsing::ChunkSet&) |
692 | | |
693 | | nsresult |
694 | | HashStore::Expire() |
695 | 0 | { |
696 | 0 | ExpireEntries(&mAddPrefixes, mAddExpirations); |
697 | 0 | ExpireEntries(&mAddCompletes, mAddExpirations); |
698 | 0 | ExpireEntries(&mSubPrefixes, mSubExpirations); |
699 | 0 | ExpireEntries(&mSubCompletes, mSubExpirations); |
700 | 0 |
|
701 | 0 | mAddChunks.Remove(mAddExpirations); |
702 | 0 | mSubChunks.Remove(mSubExpirations); |
703 | 0 |
|
704 | 0 | mAddExpirations.Clear(); |
705 | 0 | mSubExpirations.Clear(); |
706 | 0 |
|
707 | 0 | return NS_OK; |
708 | 0 | } |
709 | | |
710 | | template<class T> |
711 | | nsresult DeflateWriteTArray(nsIOutputStream* aStream, nsTArray<T>& aIn) |
712 | 0 | { |
713 | 0 | uLongf insize = aIn.Length() * sizeof(T); |
714 | 0 | uLongf outsize = compressBound(insize); |
715 | 0 | FallibleTArray<char> outBuff; |
716 | 0 | if (!outBuff.SetLength(outsize, fallible)) { |
717 | 0 | return NS_ERROR_OUT_OF_MEMORY; |
718 | 0 | } |
719 | 0 | |
720 | 0 | int zerr = compress(reinterpret_cast<Bytef*>(outBuff.Elements()), |
721 | 0 | &outsize, |
722 | 0 | reinterpret_cast<const Bytef*>(aIn.Elements()), |
723 | 0 | insize); |
724 | 0 | if (zerr != Z_OK) { |
725 | 0 | return NS_ERROR_FAILURE; |
726 | 0 | } |
727 | 0 | LOG(("DeflateWriteTArray: %lu in %lu out", insize, outsize)); |
728 | 0 |
|
729 | 0 | outBuff.TruncateLength(outsize); |
730 | 0 |
|
731 | 0 | // Length of compressed data stream |
732 | 0 | uint32_t dataLen = outBuff.Length(); |
733 | 0 | uint32_t written; |
734 | 0 | nsresult rv = aStream->Write(reinterpret_cast<char*>(&dataLen), sizeof(dataLen), &written); |
735 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
736 | 0 |
|
737 | 0 | NS_ASSERTION(written == sizeof(dataLen), "Error writing deflate length"); |
738 | 0 |
|
739 | 0 | // Store to stream |
740 | 0 | rv = WriteTArray(aStream, outBuff); |
741 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
742 | 0 |
|
743 | 0 | return NS_OK; |
744 | 0 | } |
745 | | |
746 | | template<class T> |
747 | | nsresult InflateReadTArray(nsIInputStream* aStream, FallibleTArray<T>* aOut, |
748 | | uint32_t aExpectedSize) |
749 | 0 | { |
750 | 0 |
|
751 | 0 | uint32_t inLen; |
752 | 0 | uint32_t read; |
753 | 0 | nsresult rv = aStream->Read(reinterpret_cast<char*>(&inLen), sizeof(inLen), &read); |
754 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
755 | 0 |
|
756 | 0 | NS_ASSERTION(read == sizeof(inLen), "Error reading inflate length"); |
757 | 0 |
|
758 | 0 | FallibleTArray<char> inBuff; |
759 | 0 | if (!inBuff.SetLength(inLen, fallible)) { |
760 | 0 | return NS_ERROR_OUT_OF_MEMORY; |
761 | 0 | } |
762 | 0 | |
763 | 0 | rv = ReadTArray(aStream, &inBuff, inLen); |
764 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
765 | 0 |
|
766 | 0 | uLongf insize = inLen; |
767 | 0 | uLongf outsize = aExpectedSize * sizeof(T); |
768 | 0 | if (!aOut->SetLength(aExpectedSize, fallible)) { |
769 | 0 | return NS_ERROR_OUT_OF_MEMORY; |
770 | 0 | } |
771 | 0 | |
772 | 0 | int zerr = uncompress(reinterpret_cast<Bytef*>(aOut->Elements()), |
773 | 0 | &outsize, |
774 | 0 | reinterpret_cast<const Bytef*>(inBuff.Elements()), |
775 | 0 | insize); |
776 | 0 | if (zerr != Z_OK) { |
777 | 0 | return NS_ERROR_FAILURE; |
778 | 0 | } |
779 | 0 | LOG(("InflateReadTArray: %lu in %lu out", insize, outsize)); |
780 | 0 |
|
781 | 0 | NS_ASSERTION(outsize == aExpectedSize * sizeof(T), "Decompression size mismatch"); |
782 | 0 |
|
783 | 0 | return NS_OK; |
784 | 0 | } |
785 | | |
786 | | static nsresult |
787 | | ByteSliceWrite(nsIOutputStream* aOut, nsTArray<uint32_t>& aData) |
788 | 0 | { |
789 | 0 | nsTArray<uint8_t> slice; |
790 | 0 | uint32_t count = aData.Length(); |
791 | 0 |
|
792 | 0 | // Only process one slice at a time to avoid using too much memory. |
793 | 0 | if (!slice.SetLength(count, fallible)) { |
794 | 0 | return NS_ERROR_OUT_OF_MEMORY; |
795 | 0 | } |
796 | 0 | |
797 | 0 | // Process slice 1. |
798 | 0 | for (uint32_t i = 0; i < count; i++) { |
799 | 0 | slice[i] = (aData[i] >> 24); |
800 | 0 | } |
801 | 0 |
|
802 | 0 | nsresult rv = DeflateWriteTArray(aOut, slice); |
803 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
804 | 0 |
|
805 | 0 | // Process slice 2. |
806 | 0 | for (uint32_t i = 0; i < count; i++) { |
807 | 0 | slice[i] = ((aData[i] >> 16) & 0xFF); |
808 | 0 | } |
809 | 0 |
|
810 | 0 | rv = DeflateWriteTArray(aOut, slice); |
811 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
812 | 0 |
|
813 | 0 | // Process slice 3. |
814 | 0 | for (uint32_t i = 0; i < count; i++) { |
815 | 0 | slice[i] = ((aData[i] >> 8) & 0xFF); |
816 | 0 | } |
817 | 0 |
|
818 | 0 | rv = DeflateWriteTArray(aOut, slice); |
819 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
820 | 0 |
|
821 | 0 | // Process slice 4. |
822 | 0 | for (uint32_t i = 0; i < count; i++) { |
823 | 0 | slice[i] = (aData[i] & 0xFF); |
824 | 0 | } |
825 | 0 |
|
826 | 0 | // The LSB slice is generally uncompressible, don't bother |
827 | 0 | // compressing it. |
828 | 0 | rv = WriteTArray(aOut, slice); |
829 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
830 | 0 |
|
831 | 0 | return NS_OK; |
832 | 0 | } |
833 | | |
834 | | static nsresult |
835 | | ByteSliceRead(nsIInputStream* aInStream, FallibleTArray<uint32_t>* aData, uint32_t count) |
836 | 0 | { |
837 | 0 | FallibleTArray<uint8_t> slice1; |
838 | 0 | FallibleTArray<uint8_t> slice2; |
839 | 0 | FallibleTArray<uint8_t> slice3; |
840 | 0 | FallibleTArray<uint8_t> slice4; |
841 | 0 |
|
842 | 0 | nsresult rv = InflateReadTArray(aInStream, &slice1, count); |
843 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
844 | 0 |
|
845 | 0 | rv = InflateReadTArray(aInStream, &slice2, count); |
846 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
847 | 0 |
|
848 | 0 | rv = InflateReadTArray(aInStream, &slice3, count); |
849 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
850 | 0 |
|
851 | 0 | rv = ReadTArray(aInStream, &slice4, count); |
852 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
853 | 0 |
|
854 | 0 | if (!aData->SetCapacity(count, fallible)) { |
855 | 0 | return NS_ERROR_OUT_OF_MEMORY; |
856 | 0 | } |
857 | 0 | |
858 | 0 | for (uint32_t i = 0; i < count; i++) { |
859 | 0 | aData->AppendElement((slice1[i] << 24) | |
860 | 0 | (slice2[i] << 16) | |
861 | 0 | (slice3[i] << 8) | |
862 | 0 | (slice4[i]), |
863 | 0 | fallible); |
864 | 0 | } |
865 | 0 |
|
866 | 0 | return NS_OK; |
867 | 0 | } |
868 | | |
869 | | nsresult |
870 | | HashStore::ReadAddPrefixes() |
871 | 0 | { |
872 | 0 | FallibleTArray<uint32_t> chunks; |
873 | 0 | uint32_t count = mHeader.numAddPrefixes; |
874 | 0 |
|
875 | 0 | nsresult rv = ByteSliceRead(mInputStream, &chunks, count); |
876 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
877 | 0 |
|
878 | 0 | if (!mAddPrefixes.SetCapacity(count, fallible)) { |
879 | 0 | return NS_ERROR_OUT_OF_MEMORY; |
880 | 0 | } |
881 | 0 | for (uint32_t i = 0; i < count; i++) { |
882 | 0 | AddPrefix *add = mAddPrefixes.AppendElement(fallible); |
883 | 0 | add->prefix.FromUint32(0); |
884 | 0 | add->addChunk = chunks[i]; |
885 | 0 | } |
886 | 0 |
|
887 | 0 | return NS_OK; |
888 | 0 | } |
889 | | |
890 | | nsresult |
891 | | HashStore::ReadSubPrefixes() |
892 | 0 | { |
893 | 0 | FallibleTArray<uint32_t> addchunks; |
894 | 0 | FallibleTArray<uint32_t> subchunks; |
895 | 0 | FallibleTArray<uint32_t> prefixes; |
896 | 0 | uint32_t count = mHeader.numSubPrefixes; |
897 | 0 |
|
898 | 0 | nsresult rv = ByteSliceRead(mInputStream, &addchunks, count); |
899 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
900 | 0 |
|
901 | 0 | rv = ByteSliceRead(mInputStream, &subchunks, count); |
902 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
903 | 0 |
|
904 | 0 | rv = ByteSliceRead(mInputStream, &prefixes, count); |
905 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
906 | 0 |
|
907 | 0 | if (!mSubPrefixes.SetCapacity(count, fallible)) { |
908 | 0 | return NS_ERROR_OUT_OF_MEMORY; |
909 | 0 | } |
910 | 0 | for (uint32_t i = 0; i < count; i++) { |
911 | 0 | SubPrefix *sub = mSubPrefixes.AppendElement(fallible); |
912 | 0 | sub->addChunk = addchunks[i]; |
913 | 0 | sub->prefix.FromUint32(prefixes[i]); |
914 | 0 | sub->subChunk = subchunks[i]; |
915 | 0 | } |
916 | 0 |
|
917 | 0 | return NS_OK; |
918 | 0 | } |
919 | | |
920 | | // Split up PrefixArray back into the constituents |
921 | | nsresult |
922 | | HashStore::WriteAddPrefixes(nsIOutputStream* aOut) |
923 | 0 | { |
924 | 0 | nsTArray<uint32_t> chunks; |
925 | 0 | uint32_t count = mAddPrefixes.Length(); |
926 | 0 | if (!chunks.SetCapacity(count, fallible)) { |
927 | 0 | return NS_ERROR_OUT_OF_MEMORY; |
928 | 0 | } |
929 | 0 | |
930 | 0 | for (uint32_t i = 0; i < count; i++) { |
931 | 0 | chunks.AppendElement(mAddPrefixes[i].Chunk()); |
932 | 0 | } |
933 | 0 |
|
934 | 0 | nsresult rv = ByteSliceWrite(aOut, chunks); |
935 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
936 | 0 |
|
937 | 0 | return NS_OK; |
938 | 0 | } |
939 | | |
940 | | nsresult |
941 | | HashStore::WriteSubPrefixes(nsIOutputStream* aOut) |
942 | 0 | { |
943 | 0 | nsTArray<uint32_t> addchunks; |
944 | 0 | nsTArray<uint32_t> subchunks; |
945 | 0 | nsTArray<uint32_t> prefixes; |
946 | 0 | uint32_t count = mSubPrefixes.Length(); |
947 | 0 | if (!addchunks.SetCapacity(count, fallible) || |
948 | 0 | !subchunks.SetCapacity(count, fallible) || |
949 | 0 | !prefixes.SetCapacity(count, fallible)) { |
950 | 0 | return NS_ERROR_OUT_OF_MEMORY; |
951 | 0 | } |
952 | 0 | |
953 | 0 | for (uint32_t i = 0; i < count; i++) { |
954 | 0 | addchunks.AppendElement(mSubPrefixes[i].AddChunk()); |
955 | 0 | prefixes.AppendElement(mSubPrefixes[i].PrefixHash().ToUint32()); |
956 | 0 | subchunks.AppendElement(mSubPrefixes[i].Chunk()); |
957 | 0 | } |
958 | 0 |
|
959 | 0 | nsresult rv = ByteSliceWrite(aOut, addchunks); |
960 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
961 | 0 |
|
962 | 0 | rv = ByteSliceWrite(aOut, subchunks); |
963 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
964 | 0 |
|
965 | 0 | rv = ByteSliceWrite(aOut, prefixes); |
966 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
967 | 0 |
|
968 | 0 | return NS_OK; |
969 | 0 | } |
970 | | |
971 | | nsresult |
972 | | HashStore::WriteFile() |
973 | 0 | { |
974 | 0 | NS_ASSERTION(mInUpdate, "Must be in update to write database."); |
975 | 0 | if (nsUrlClassifierDBService::ShutdownHasStarted()) { |
976 | 0 | return NS_ERROR_ABORT; |
977 | 0 | } |
978 | 0 | |
979 | 0 | nsCOMPtr<nsIFile> storeFile; |
980 | 0 | nsresult rv = mStoreDirectory->Clone(getter_AddRefs(storeFile)); |
981 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
982 | 0 | rv = storeFile->AppendNative(mTableName + NS_LITERAL_CSTRING(".sbstore")); |
983 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
984 | 0 |
|
985 | 0 | nsCOMPtr<nsIOutputStream> out; |
986 | 0 | rv = NS_NewCheckSummedOutputStream(getter_AddRefs(out), storeFile); |
987 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
988 | 0 |
|
989 | 0 | uint32_t written; |
990 | 0 | rv = out->Write(reinterpret_cast<char*>(&mHeader), sizeof(mHeader), &written); |
991 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
992 | 0 |
|
993 | 0 | // Write chunk numbers. |
994 | 0 | rv = mAddChunks.Write(out); |
995 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
996 | 0 |
|
997 | 0 | rv = mSubChunks.Write(out); |
998 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
999 | 0 |
|
1000 | 0 | // Write hashes. |
1001 | 0 | rv = WriteAddPrefixes(out); |
1002 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1003 | 0 |
|
1004 | 0 | rv = WriteSubPrefixes(out); |
1005 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1006 | 0 |
|
1007 | 0 | rv = WriteTArray(out, mAddCompletes); |
1008 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1009 | 0 |
|
1010 | 0 | rv = WriteTArray(out, mSubCompletes); |
1011 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1012 | 0 |
|
1013 | 0 | nsCOMPtr<nsISafeOutputStream> safeOut = do_QueryInterface(out, &rv); |
1014 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1015 | 0 |
|
1016 | 0 | rv = safeOut->Finish(); |
1017 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1018 | 0 |
|
1019 | 0 | return NS_OK; |
1020 | 0 | } |
1021 | | |
1022 | | template <class T> |
1023 | | static void |
1024 | | Erase(FallibleTArray<T>* array, |
1025 | | typename nsTArray<T>::iterator& iterStart, |
1026 | | typename nsTArray<T>::iterator& iterEnd) |
1027 | 0 | { |
1028 | 0 | uint32_t start = iterStart - array->begin(); |
1029 | 0 | uint32_t count = iterEnd - iterStart; |
1030 | 0 |
|
1031 | 0 | if (count > 0) { |
1032 | 0 | array->RemoveElementsAt(start, count); |
1033 | 0 | } |
1034 | 0 | } Unexecuted instantiation: HashStore.cpp:void mozilla::safebrowsing::Erase<mozilla::safebrowsing::AddComplete>(FallibleTArray<mozilla::safebrowsing::AddComplete>*, nsTArray<mozilla::safebrowsing::AddComplete>::iterator&, nsTArray<mozilla::safebrowsing::AddComplete>::iterator&) Unexecuted instantiation: HashStore.cpp:void mozilla::safebrowsing::Erase<mozilla::safebrowsing::SubComplete>(FallibleTArray<mozilla::safebrowsing::SubComplete>*, nsTArray<mozilla::safebrowsing::SubComplete>::iterator&, nsTArray<mozilla::safebrowsing::SubComplete>::iterator&) Unexecuted instantiation: HashStore.cpp:void mozilla::safebrowsing::Erase<mozilla::safebrowsing::AddPrefix>(FallibleTArray<mozilla::safebrowsing::AddPrefix>*, nsTArray<mozilla::safebrowsing::AddPrefix>::iterator&, nsTArray<mozilla::safebrowsing::AddPrefix>::iterator&) Unexecuted instantiation: HashStore.cpp:void mozilla::safebrowsing::Erase<mozilla::safebrowsing::SubPrefix>(FallibleTArray<mozilla::safebrowsing::SubPrefix>*, nsTArray<mozilla::safebrowsing::SubPrefix>::iterator&, nsTArray<mozilla::safebrowsing::SubPrefix>::iterator&) |
1035 | | |
1036 | | // Find items matching between |subs| and |adds|, and remove them, |
1037 | | // recording the item from |adds| in |adds_removed|. To minimize |
1038 | | // copies, the inputs are processing in parallel, so |subs| and |adds| |
1039 | | // should be compatibly ordered (either by SBAddPrefixLess or |
1040 | | // SBAddPrefixHashLess). |
1041 | | // |
1042 | | // |predAS| provides add < sub, |predSA| provides sub < add, for the |
1043 | | // tightest compare appropriate (see calls in SBProcessSubs). |
1044 | | template<class TSub, class TAdd> |
1045 | | static void |
1046 | | KnockoutSubs(FallibleTArray<TSub>* aSubs, FallibleTArray<TAdd>* aAdds) |
1047 | 0 | { |
1048 | 0 | // Keep a pair of output iterators for writing kept items. Due to |
1049 | 0 | // deletions, these may lag the main iterators. Using erase() on |
1050 | 0 | // individual items would result in O(N^2) copies. Using a list |
1051 | 0 | // would work around that, at double or triple the memory cost. |
1052 | 0 | auto addOut = aAdds->begin(); |
1053 | 0 | auto addIter = aAdds->begin(); |
1054 | 0 |
|
1055 | 0 | auto subOut = aSubs->begin(); |
1056 | 0 | auto subIter = aSubs->begin(); |
1057 | 0 |
|
1058 | 0 | auto addEnd = aAdds->end(); |
1059 | 0 | auto subEnd = aSubs->end(); |
1060 | 0 |
|
1061 | 0 | while (addIter != addEnd && subIter != subEnd) { |
1062 | 0 | // additer compare, so it compares on add chunk |
1063 | 0 | int32_t cmp = addIter->Compare(*subIter); |
1064 | 0 | if (cmp > 0) { |
1065 | 0 | // If |*sub_iter| < |*add_iter|, retain the sub. |
1066 | 0 | *subOut = *subIter; |
1067 | 0 | ++subOut; |
1068 | 0 | ++subIter; |
1069 | 0 | } else if (cmp < 0) { |
1070 | 0 | // If |*add_iter| < |*sub_iter|, retain the add. |
1071 | 0 | *addOut = *addIter; |
1072 | 0 | ++addOut; |
1073 | 0 | ++addIter; |
1074 | 0 | } else { |
1075 | 0 | // Drop equal items |
1076 | 0 | ++addIter; |
1077 | 0 | ++subIter; |
1078 | 0 | } |
1079 | 0 | } |
1080 | 0 |
|
1081 | 0 | Erase(aAdds, addOut, addIter); |
1082 | 0 | Erase(aSubs, subOut, subIter); |
1083 | 0 | } Unexecuted instantiation: HashStore.cpp:void mozilla::safebrowsing::KnockoutSubs<mozilla::safebrowsing::SubPrefix, mozilla::safebrowsing::AddPrefix>(FallibleTArray<mozilla::safebrowsing::SubPrefix>*, FallibleTArray<mozilla::safebrowsing::AddPrefix>*) Unexecuted instantiation: HashStore.cpp:void mozilla::safebrowsing::KnockoutSubs<mozilla::safebrowsing::SubComplete, mozilla::safebrowsing::AddComplete>(FallibleTArray<mozilla::safebrowsing::SubComplete>*, FallibleTArray<mozilla::safebrowsing::AddComplete>*) |
1084 | | |
1085 | | // Remove items in |removes| from |fullHashes|. |fullHashes| and |
1086 | | // |removes| should be ordered by SBAddPrefix component. |
1087 | | template <class T> |
1088 | | static void |
1089 | | RemoveMatchingPrefixes(const SubPrefixArray& aSubs, FallibleTArray<T>* aFullHashes) |
1090 | 0 | { |
1091 | 0 | // Where to store kept items. |
1092 | 0 | auto out = aFullHashes->begin(); |
1093 | 0 | auto hashIter = aFullHashes->begin(); |
1094 | 0 | auto hashEnd = aFullHashes->end(); |
1095 | 0 |
|
1096 | 0 | auto removeIter = aSubs.begin(); |
1097 | 0 | auto removeEnd = aSubs.end(); |
1098 | 0 |
|
1099 | 0 | while (hashIter != hashEnd && removeIter != removeEnd) { |
1100 | 0 | int32_t cmp = removeIter->CompareAlt(*hashIter); |
1101 | 0 | if (cmp > 0) { |
1102 | 0 | // Keep items less than |*removeIter|. |
1103 | 0 | *out = *hashIter; |
1104 | 0 | ++out; |
1105 | 0 | ++hashIter; |
1106 | 0 | } else if (cmp < 0) { |
1107 | 0 | // No hit for |*removeIter|, bump it forward. |
1108 | 0 | ++removeIter; |
1109 | 0 | } else { |
1110 | 0 | // Drop equal items, there may be multiple hits. |
1111 | 0 | do { |
1112 | 0 | ++hashIter; |
1113 | 0 | } while (hashIter != hashEnd && |
1114 | 0 | !(removeIter->CompareAlt(*hashIter) < 0)); |
1115 | 0 | ++removeIter; |
1116 | 0 | } |
1117 | 0 | } |
1118 | 0 | Erase(aFullHashes, out, hashIter); |
1119 | 0 | } Unexecuted instantiation: HashStore.cpp:void mozilla::safebrowsing::RemoveMatchingPrefixes<mozilla::safebrowsing::AddComplete>(FallibleTArray<mozilla::safebrowsing::SubPrefix> const&, FallibleTArray<mozilla::safebrowsing::AddComplete>*) Unexecuted instantiation: HashStore.cpp:void mozilla::safebrowsing::RemoveMatchingPrefixes<mozilla::safebrowsing::SubComplete>(FallibleTArray<mozilla::safebrowsing::SubPrefix> const&, FallibleTArray<mozilla::safebrowsing::SubComplete>*) |
1120 | | |
1121 | | static void |
1122 | | RemoveDeadSubPrefixes(SubPrefixArray& aSubs, ChunkSet& aAddChunks) |
1123 | 0 | { |
1124 | 0 | auto subIter = aSubs.begin(); |
1125 | 0 |
|
1126 | 0 | for (const auto& sub : aSubs) { |
1127 | 0 | bool hasChunk = aAddChunks.Has(sub.AddChunk()); |
1128 | 0 | // Keep the subprefix if the chunk it refers to is one |
1129 | 0 | // we haven't seen it yet. |
1130 | 0 | if (!hasChunk) { |
1131 | 0 | *subIter = sub; |
1132 | 0 | subIter++; |
1133 | 0 | } |
1134 | 0 | } |
1135 | 0 |
|
1136 | 0 | LOG(("Removed %" PRId64 " dead SubPrefix entries.", |
1137 | 0 | static_cast<int64_t>(aSubs.end() - subIter))); |
1138 | 0 | aSubs.TruncateLength(subIter - aSubs.begin()); |
1139 | 0 | } |
1140 | | |
1141 | | #ifdef DEBUG |
1142 | | template <class T> |
1143 | | static void EnsureSorted(FallibleTArray<T>* aArray) |
1144 | | { |
1145 | | auto start = aArray->begin(); |
1146 | | auto end = aArray->end(); |
1147 | | auto iter = start; |
1148 | | auto previous = start; |
1149 | | |
1150 | | while (iter != end) { |
1151 | | previous = iter; |
1152 | | ++iter; |
1153 | | if (iter != end) { |
1154 | | MOZ_ASSERT(iter->Compare(*previous) >= 0); |
1155 | | } |
1156 | | } |
1157 | | |
1158 | | return; |
1159 | | } |
1160 | | #endif |
1161 | | |
1162 | | nsresult |
1163 | | HashStore::ProcessSubs() |
1164 | 0 | { |
1165 | | #ifdef DEBUG |
1166 | | EnsureSorted(&mAddPrefixes); |
1167 | | EnsureSorted(&mSubPrefixes); |
1168 | | EnsureSorted(&mAddCompletes); |
1169 | | EnsureSorted(&mSubCompletes); |
1170 | | LOG(("All databases seem to have a consistent sort order.")); |
1171 | | #endif |
1172 | |
|
1173 | 0 | RemoveMatchingPrefixes(mSubPrefixes, &mAddCompletes); |
1174 | 0 | RemoveMatchingPrefixes(mSubPrefixes, &mSubCompletes); |
1175 | 0 |
|
1176 | 0 | // Remove any remaining subbed prefixes from both addprefixes |
1177 | 0 | // and addcompletes. |
1178 | 0 | KnockoutSubs(&mSubPrefixes, &mAddPrefixes); |
1179 | 0 | KnockoutSubs(&mSubCompletes, &mAddCompletes); |
1180 | 0 |
|
1181 | 0 | // Remove any remaining subprefixes referring to addchunks that |
1182 | 0 | // we have (and hence have been processed above). |
1183 | 0 | RemoveDeadSubPrefixes(mSubPrefixes, mAddChunks); |
1184 | 0 |
|
1185 | | #ifdef DEBUG |
1186 | | EnsureSorted(&mAddPrefixes); |
1187 | | EnsureSorted(&mSubPrefixes); |
1188 | | EnsureSorted(&mAddCompletes); |
1189 | | EnsureSorted(&mSubCompletes); |
1190 | | LOG(("All databases seem to have a consistent sort order.")); |
1191 | | #endif |
1192 | |
|
1193 | 0 | return NS_OK; |
1194 | 0 | } |
1195 | | |
1196 | | nsresult |
1197 | | HashStore::AugmentAdds(const nsTArray<uint32_t>& aPrefixes) |
1198 | 0 | { |
1199 | 0 | uint32_t cnt = aPrefixes.Length(); |
1200 | 0 | if (cnt != mAddPrefixes.Length()) { |
1201 | 0 | LOG(("Amount of prefixes in cache not consistent with store (%zu vs %zu)", |
1202 | 0 | aPrefixes.Length(), mAddPrefixes.Length())); |
1203 | 0 | return NS_ERROR_FAILURE; |
1204 | 0 | } |
1205 | 0 | for (uint32_t i = 0; i < cnt; i++) { |
1206 | 0 | mAddPrefixes[i].prefix.FromUint32(aPrefixes[i]); |
1207 | 0 | } |
1208 | 0 | return NS_OK; |
1209 | 0 | } |
1210 | | |
1211 | | ChunkSet& |
1212 | | HashStore::AddChunks() |
1213 | 0 | { |
1214 | 0 | ReadChunkNumbers(); |
1215 | 0 |
|
1216 | 0 | return mAddChunks; |
1217 | 0 | } |
1218 | | |
1219 | | ChunkSet& |
1220 | | HashStore::SubChunks() |
1221 | 0 | { |
1222 | 0 | ReadChunkNumbers(); |
1223 | 0 |
|
1224 | 0 | return mSubChunks; |
1225 | 0 | } |
1226 | | |
1227 | | AddCompleteArray& |
1228 | | HashStore::AddCompletes() |
1229 | 0 | { |
1230 | 0 | ReadCompletions(); |
1231 | 0 |
|
1232 | 0 | return mAddCompletes; |
1233 | 0 | } |
1234 | | |
1235 | | SubCompleteArray& |
1236 | | HashStore::SubCompletes() |
1237 | 0 | { |
1238 | 0 | ReadCompletions(); |
1239 | 0 |
|
1240 | 0 | return mSubCompletes; |
1241 | 0 | } |
1242 | | |
1243 | | bool |
1244 | | HashStore::AlreadyReadChunkNumbers() const |
1245 | 0 | { |
1246 | 0 | // If there are chunks but chunk set not yet contains any data |
1247 | 0 | // Then we haven't read chunk numbers. |
1248 | 0 | if ((mHeader.numAddChunks != 0 && mAddChunks.Length() == 0) || |
1249 | 0 | (mHeader.numSubChunks != 0 && mSubChunks.Length() == 0)) { |
1250 | 0 | return false; |
1251 | 0 | } |
1252 | 0 | return true; |
1253 | 0 | } |
1254 | | |
1255 | | bool |
1256 | | HashStore::AlreadyReadCompletions() const |
1257 | 0 | { |
1258 | 0 | // If there are completions but completion set not yet contains any data |
1259 | 0 | // Then we haven't read completions. |
1260 | 0 | if ((mHeader.numAddCompletes != 0 && mAddCompletes.Length() == 0) || |
1261 | 0 | (mHeader.numSubCompletes != 0 && mSubCompletes.Length() == 0)) { |
1262 | 0 | return false; |
1263 | 0 | } |
1264 | 0 | return true; |
1265 | 0 | } |
1266 | | |
1267 | | } // namespace safebrowsing |
1268 | | } // namespace mozilla |