/src/mozilla-central/toolkit/components/url-classifier/Classifier.cpp
Line | Count | Source (jump to first uncovered line) |
1 | | //* -*- Mode: C++; tab-width: 8; indent-tabs-mode: nil; c-basic-offset: 2 -*- */ |
2 | | /* This Source Code Form is subject to the terms of the Mozilla Public |
3 | | * License, v. 2.0. If a copy of the MPL was not distributed with this |
4 | | * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ |
5 | | |
6 | | #include "Classifier.h" |
7 | | #include "LookupCacheV4.h" |
8 | | #include "nsIPrefBranch.h" |
9 | | #include "nsIPrefService.h" |
10 | | #include "nsISimpleEnumerator.h" |
11 | | #include "nsIRandomGenerator.h" |
12 | | #include "nsIInputStream.h" |
13 | | #include "nsISeekableStream.h" |
14 | | #include "nsIFile.h" |
15 | | #include "nsNetCID.h" |
16 | | #include "nsPrintfCString.h" |
17 | | #include "nsThreadUtils.h" |
18 | | #include "mozilla/EndianUtils.h" |
19 | | #include "mozilla/Telemetry.h" |
20 | | #include "mozilla/IntegerPrintfMacros.h" |
21 | | #include "mozilla/Logging.h" |
22 | | #include "mozilla/SyncRunnable.h" |
23 | | #include "mozilla/Base64.h" |
24 | | #include "mozilla/Unused.h" |
25 | | #include "mozilla/UniquePtr.h" |
26 | | #include "nsIUrlClassifierUtils.h" |
27 | | #include "nsUrlClassifierDBService.h" |
28 | | |
29 | | // MOZ_LOG=UrlClassifierDbService:5 |
30 | | extern mozilla::LazyLogModule gUrlClassifierDbServiceLog; |
31 | 0 | #define LOG(args) MOZ_LOG(gUrlClassifierDbServiceLog, mozilla::LogLevel::Debug, args) |
32 | 0 | #define LOG_ENABLED() MOZ_LOG_TEST(gUrlClassifierDbServiceLog, mozilla::LogLevel::Debug) |
33 | | |
34 | 0 | #define STORE_DIRECTORY NS_LITERAL_CSTRING("safebrowsing") |
35 | 0 | #define TO_DELETE_DIR_SUFFIX NS_LITERAL_CSTRING("-to_delete") |
36 | 0 | #define BACKUP_DIR_SUFFIX NS_LITERAL_CSTRING("-backup") |
37 | 0 | #define UPDATING_DIR_SUFFIX NS_LITERAL_CSTRING("-updating") |
38 | | |
39 | 0 | #define METADATA_SUFFIX NS_LITERAL_CSTRING(".metadata") |
40 | | |
41 | | namespace mozilla { |
42 | | namespace safebrowsing { |
43 | | |
44 | | void |
45 | | Classifier::SplitTables(const nsACString& str, nsTArray<nsCString>& tables) |
46 | 0 | { |
47 | 0 | tables.Clear(); |
48 | 0 |
|
49 | 0 | nsACString::const_iterator begin, iter, end; |
50 | 0 | str.BeginReading(begin); |
51 | 0 | str.EndReading(end); |
52 | 0 | while (begin != end) { |
53 | 0 | iter = begin; |
54 | 0 | FindCharInReadable(',', iter, end); |
55 | 0 | nsDependentCSubstring table = Substring(begin,iter); |
56 | 0 | if (!table.IsEmpty()) { |
57 | 0 | tables.AppendElement(Substring(begin, iter)); |
58 | 0 | } |
59 | 0 | begin = iter; |
60 | 0 | if (begin != end) { |
61 | 0 | begin++; |
62 | 0 | } |
63 | 0 | } |
64 | 0 |
|
65 | 0 | // Remove duplicates |
66 | 0 | tables.Sort(); |
67 | 0 | const auto newEnd = std::unique(tables.begin(), tables.end()); |
68 | 0 | tables.TruncateLength(std::distance(tables.begin(), newEnd)); |
69 | 0 | } |
70 | | |
71 | | nsresult |
72 | | Classifier::GetPrivateStoreDirectory(nsIFile* aRootStoreDirectory, |
73 | | const nsACString& aTableName, |
74 | | const nsACString& aProvider, |
75 | | nsIFile** aPrivateStoreDirectory) |
76 | 0 | { |
77 | 0 | NS_ENSURE_ARG_POINTER(aPrivateStoreDirectory); |
78 | 0 |
|
79 | 0 | if (!StringEndsWith(aTableName, NS_LITERAL_CSTRING("-proto"))) { |
80 | 0 | // Only V4 table names (ends with '-proto') would be stored |
81 | 0 | // to per-provider sub-directory. |
82 | 0 | nsCOMPtr<nsIFile>(aRootStoreDirectory).forget(aPrivateStoreDirectory); |
83 | 0 | return NS_OK; |
84 | 0 | } |
85 | 0 | |
86 | 0 | if (aProvider.IsEmpty()) { |
87 | 0 | // When failing to get provider, just store in the root folder. |
88 | 0 | nsCOMPtr<nsIFile>(aRootStoreDirectory).forget(aPrivateStoreDirectory); |
89 | 0 | return NS_OK; |
90 | 0 | } |
91 | 0 | |
92 | 0 | nsCOMPtr<nsIFile> providerDirectory; |
93 | 0 |
|
94 | 0 | // Clone first since we are gonna create a new directory. |
95 | 0 | nsresult rv = aRootStoreDirectory->Clone(getter_AddRefs(providerDirectory)); |
96 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
97 | 0 |
|
98 | 0 | // Append the provider name to the root store directory. |
99 | 0 | rv = providerDirectory->AppendNative(aProvider); |
100 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
101 | 0 |
|
102 | 0 | // Ensure existence of the provider directory. |
103 | 0 | bool dirExists; |
104 | 0 | rv = providerDirectory->Exists(&dirExists); |
105 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
106 | 0 |
|
107 | 0 | if (!dirExists) { |
108 | 0 | LOG(("Creating private directory for %s", nsCString(aTableName).get())); |
109 | 0 | rv = providerDirectory->Create(nsIFile::DIRECTORY_TYPE, 0755); |
110 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
111 | 0 | providerDirectory.forget(aPrivateStoreDirectory); |
112 | 0 | return rv; |
113 | 0 | } |
114 | 0 | |
115 | 0 | // Store directory exists. Check if it's a directory. |
116 | 0 | bool isDir; |
117 | 0 | rv = providerDirectory->IsDirectory(&isDir); |
118 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
119 | 0 | if (!isDir) { |
120 | 0 | return NS_ERROR_FILE_DESTINATION_NOT_DIR; |
121 | 0 | } |
122 | 0 | |
123 | 0 | providerDirectory.forget(aPrivateStoreDirectory); |
124 | 0 |
|
125 | 0 | return NS_OK; |
126 | 0 | } |
127 | | |
128 | | Classifier::Classifier() |
129 | | : mIsTableRequestResultOutdated(true) |
130 | | , mUpdateInterrupted(true) |
131 | | , mIsClosed(false) |
132 | 0 | { |
133 | 0 | NS_NewNamedThread(NS_LITERAL_CSTRING("Classifier Update"), |
134 | 0 | getter_AddRefs(mUpdateThread)); |
135 | 0 | } |
136 | | |
137 | | Classifier::~Classifier() |
138 | 0 | { |
139 | 0 | Close(); |
140 | 0 | } |
141 | | |
142 | | nsresult |
143 | | Classifier::SetupPathNames() |
144 | 0 | { |
145 | 0 | // Get the root directory where to store all the databases. |
146 | 0 | nsresult rv = mCacheDirectory->Clone(getter_AddRefs(mRootStoreDirectory)); |
147 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
148 | 0 |
|
149 | 0 | rv = mRootStoreDirectory->AppendNative(STORE_DIRECTORY); |
150 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
151 | 0 |
|
152 | 0 | // Make sure LookupCaches (which are persistent and survive updates) |
153 | 0 | // are reading/writing in the right place. We will be moving their |
154 | 0 | // files "underneath" them during backup/restore. |
155 | 0 | for (uint32_t i = 0; i < mLookupCaches.Length(); i++) { |
156 | 0 | mLookupCaches[i]->UpdateRootDirHandle(mRootStoreDirectory); |
157 | 0 | } |
158 | 0 |
|
159 | 0 | // Directory where to move a backup before an update. |
160 | 0 | rv = mCacheDirectory->Clone(getter_AddRefs(mBackupDirectory)); |
161 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
162 | 0 |
|
163 | 0 | rv = mBackupDirectory->AppendNative(STORE_DIRECTORY + BACKUP_DIR_SUFFIX); |
164 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
165 | 0 |
|
166 | 0 | // Directory where to be working on the update. |
167 | 0 | rv = mCacheDirectory->Clone(getter_AddRefs(mUpdatingDirectory)); |
168 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
169 | 0 |
|
170 | 0 | rv = mUpdatingDirectory->AppendNative(STORE_DIRECTORY + UPDATING_DIR_SUFFIX); |
171 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
172 | 0 |
|
173 | 0 | // Directory where to move the backup so we can atomically |
174 | 0 | // delete (really move) it. |
175 | 0 | rv = mCacheDirectory->Clone(getter_AddRefs(mToDeleteDirectory)); |
176 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
177 | 0 |
|
178 | 0 | rv = mToDeleteDirectory->AppendNative(STORE_DIRECTORY + TO_DELETE_DIR_SUFFIX); |
179 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
180 | 0 |
|
181 | 0 | return NS_OK; |
182 | 0 | } |
183 | | |
184 | | nsresult |
185 | | Classifier::CreateStoreDirectory() |
186 | 0 | { |
187 | 0 | if (mIsClosed) { |
188 | 0 | return NS_OK; // nothing to do, the classifier is done |
189 | 0 | } |
190 | 0 | |
191 | 0 | // Ensure the safebrowsing directory exists. |
192 | 0 | bool storeExists; |
193 | 0 | nsresult rv = mRootStoreDirectory->Exists(&storeExists); |
194 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
195 | 0 |
|
196 | 0 | if (!storeExists) { |
197 | 0 | rv = mRootStoreDirectory->Create(nsIFile::DIRECTORY_TYPE, 0755); |
198 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
199 | 0 | } else { |
200 | 0 | bool storeIsDir; |
201 | 0 | rv = mRootStoreDirectory->IsDirectory(&storeIsDir); |
202 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
203 | 0 | if (!storeIsDir) |
204 | 0 | return NS_ERROR_FILE_DESTINATION_NOT_DIR; |
205 | 0 | } |
206 | 0 | |
207 | 0 | return NS_OK; |
208 | 0 | } |
209 | | |
210 | | nsresult |
211 | | Classifier::Open(nsIFile& aCacheDirectory) |
212 | 0 | { |
213 | 0 | // Remember the Local profile directory. |
214 | 0 | nsresult rv = aCacheDirectory.Clone(getter_AddRefs(mCacheDirectory)); |
215 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
216 | 0 |
|
217 | 0 | // Create the handles to the update and backup directories. |
218 | 0 | rv = SetupPathNames(); |
219 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
220 | 0 |
|
221 | 0 | // Clean up any to-delete directories that haven't been deleted yet. |
222 | 0 | // This is still required for backward compatibility. |
223 | 0 | rv = CleanToDelete(); |
224 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
225 | 0 |
|
226 | 0 | // If we met a crash during the previous update, "safebrowsing-updating" |
227 | 0 | // directory will exist and let's remove it. |
228 | 0 | rv = mUpdatingDirectory->Remove(true); |
229 | 0 | if (NS_SUCCEEDED(rv)) { |
230 | 0 | // If the "safebrowsing-updating" exists, it implies a crash occurred |
231 | 0 | // in the previous update. |
232 | 0 | LOG(("We may have hit a crash in the previous update.")); |
233 | 0 | } |
234 | 0 |
|
235 | 0 | // Check whether we have an incomplete update and recover from the |
236 | 0 | // backup if so. |
237 | 0 | rv = RecoverBackups(); |
238 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
239 | 0 |
|
240 | 0 | // Make sure the main store directory exists. |
241 | 0 | rv = CreateStoreDirectory(); |
242 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
243 | 0 |
|
244 | 0 | // Build the list of know urlclassifier lists |
245 | 0 | // XXX: Disk IO potentially on the main thread during startup |
246 | 0 | RegenActiveTables(); |
247 | 0 |
|
248 | 0 | return NS_OK; |
249 | 0 | } |
250 | | |
251 | | void |
252 | | Classifier::Close() |
253 | 0 | { |
254 | 0 | // Close will be called by PreShutdown, set |mUpdateInterrupted| here |
255 | 0 | // to abort an ongoing update if possible. It is important to note that |
256 | 0 | // things put here should not affect an ongoing update thread. |
257 | 0 | mUpdateInterrupted = true; |
258 | 0 | mIsClosed = true; |
259 | 0 | DropStores(); |
260 | 0 | } |
261 | | |
262 | | void |
263 | | Classifier::Reset() |
264 | 0 | { |
265 | 0 | MOZ_ASSERT(NS_GetCurrentThread() != mUpdateThread, |
266 | 0 | "Reset() MUST NOT be called on update thread"); |
267 | 0 |
|
268 | 0 | LOG(("Reset() is called so we interrupt the update.")); |
269 | 0 | mUpdateInterrupted = true; |
270 | 0 |
|
271 | 0 | RefPtr<Classifier> self = this; |
272 | 0 | auto resetFunc = [self] { |
273 | 0 | if (self->mIsClosed) { |
274 | 0 | return; // too late to reset, bail |
275 | 0 | } |
276 | 0 | self->DropStores(); |
277 | 0 |
|
278 | 0 | self->mRootStoreDirectory->Remove(true); |
279 | 0 | self->mBackupDirectory->Remove(true); |
280 | 0 | self->mUpdatingDirectory->Remove(true); |
281 | 0 | self->mToDeleteDirectory->Remove(true); |
282 | 0 |
|
283 | 0 | self->CreateStoreDirectory(); |
284 | 0 | self->RegenActiveTables(); |
285 | 0 | }; |
286 | 0 |
|
287 | 0 | if (!mUpdateThread) { |
288 | 0 | LOG(("Async update has been disabled. Just Reset() on worker thread.")); |
289 | 0 | resetFunc(); |
290 | 0 | return; |
291 | 0 | } |
292 | 0 |
|
293 | 0 | nsCOMPtr<nsIRunnable> r = |
294 | 0 | NS_NewRunnableFunction("safebrowsing::Classifier::Reset", resetFunc); |
295 | 0 | SyncRunnable::DispatchToThread(mUpdateThread, r); |
296 | 0 | } |
297 | | |
298 | | void |
299 | | Classifier::ResetTables(ClearType aType, const nsTArray<nsCString>& aTables) |
300 | 0 | { |
301 | 0 | for (uint32_t i = 0; i < aTables.Length(); i++) { |
302 | 0 | LOG(("Resetting table: %s", aTables[i].get())); |
303 | 0 | RefPtr<LookupCache> cache = GetLookupCache(aTables[i]); |
304 | 0 | if (cache) { |
305 | 0 | // Remove any cached Completes for this table if clear type is Clear_Cache |
306 | 0 | if (aType == Clear_Cache) { |
307 | 0 | cache->ClearCache(); |
308 | 0 | } else { |
309 | 0 | cache->ClearAll(); |
310 | 0 | } |
311 | 0 | } |
312 | 0 | } |
313 | 0 |
|
314 | 0 | // Clear on-disk database if clear type is Clear_All |
315 | 0 | if (aType == Clear_All) { |
316 | 0 | DeleteTables(mRootStoreDirectory, aTables); |
317 | 0 |
|
318 | 0 | RegenActiveTables(); |
319 | 0 | } |
320 | 0 | } |
321 | | |
322 | | void |
323 | | Classifier::DeleteTables(nsIFile* aDirectory, const nsTArray<nsCString>& aTables) |
324 | 0 | { |
325 | 0 | nsCOMPtr<nsIDirectoryEnumerator> entries; |
326 | 0 | nsresult rv = aDirectory->GetDirectoryEntries(getter_AddRefs(entries)); |
327 | 0 | NS_ENSURE_SUCCESS_VOID(rv); |
328 | 0 |
|
329 | 0 | nsCOMPtr<nsIFile> file; |
330 | 0 | while (NS_SUCCEEDED(rv = entries->GetNextFile(getter_AddRefs(file))) && file) { |
331 | 0 | // If |file| is a directory, recurse to find its entries as well. |
332 | 0 | bool isDirectory; |
333 | 0 | if (NS_FAILED(file->IsDirectory(&isDirectory))) { |
334 | 0 | continue; |
335 | 0 | } |
336 | 0 | if (isDirectory) { |
337 | 0 | DeleteTables(file, aTables); |
338 | 0 | continue; |
339 | 0 | } |
340 | 0 | |
341 | 0 | nsCString leafName; |
342 | 0 | rv = file->GetNativeLeafName(leafName); |
343 | 0 | NS_ENSURE_SUCCESS_VOID(rv); |
344 | 0 |
|
345 | 0 | // Remove file extension if there's one. |
346 | 0 | int32_t dotPosition = leafName.RFind("."); |
347 | 0 | if (dotPosition >= 0) { |
348 | 0 | leafName.Truncate(dotPosition); |
349 | 0 | } |
350 | 0 |
|
351 | 0 | if (!leafName.IsEmpty() && aTables.Contains(leafName)) { |
352 | 0 | if (NS_FAILED(file->Remove(false))) { |
353 | 0 | NS_WARNING(nsPrintfCString("Fail to remove file %s from the disk", |
354 | 0 | leafName.get()).get()); |
355 | 0 | } |
356 | 0 | } |
357 | 0 | } |
358 | 0 | NS_ENSURE_SUCCESS_VOID(rv); |
359 | 0 | } |
360 | | |
361 | | void |
362 | | Classifier::TableRequest(nsACString& aResult) |
363 | 0 | { |
364 | 0 | MOZ_ASSERT(!NS_IsMainThread(), |
365 | 0 | "TableRequest must be called on the classifier worker thread."); |
366 | 0 |
|
367 | 0 | // This function and all disk I/O are guaranteed to occur |
368 | 0 | // on the same thread so we don't need to add a lock around. |
369 | 0 | if (!mIsTableRequestResultOutdated) { |
370 | 0 | aResult = mTableRequestResult; |
371 | 0 | return; |
372 | 0 | } |
373 | 0 | |
374 | 0 | // Generating v2 table info. |
375 | 0 | nsTArray<nsCString> tables; |
376 | 0 | ActiveTables(tables); |
377 | 0 | for (uint32_t i = 0; i < tables.Length(); i++) { |
378 | 0 | HashStore store(tables[i], GetProvider(tables[i]), mRootStoreDirectory); |
379 | 0 |
|
380 | 0 | nsresult rv = store.Open(); |
381 | 0 | if (NS_FAILED(rv)) { |
382 | 0 | continue; |
383 | 0 | } |
384 | 0 | |
385 | 0 | ChunkSet &adds = store.AddChunks(); |
386 | 0 | ChunkSet &subs = store.SubChunks(); |
387 | 0 |
|
388 | 0 | // Open HashStore will always succeed even that is not a v2 table. |
389 | 0 | // So skip tables without add and sub chunks. |
390 | 0 | if (adds.Length() == 0 && subs.Length() == 0) { |
391 | 0 | continue; |
392 | 0 | } |
393 | 0 | |
394 | 0 | aResult.Append(store.TableName()); |
395 | 0 | aResult.Append(';'); |
396 | 0 |
|
397 | 0 | if (adds.Length() > 0) { |
398 | 0 | aResult.AppendLiteral("a:"); |
399 | 0 | nsAutoCString addList; |
400 | 0 | adds.Serialize(addList); |
401 | 0 | aResult.Append(addList); |
402 | 0 | } |
403 | 0 |
|
404 | 0 | if (subs.Length() > 0) { |
405 | 0 | if (adds.Length() > 0) |
406 | 0 | aResult.Append(':'); |
407 | 0 | aResult.AppendLiteral("s:"); |
408 | 0 | nsAutoCString subList; |
409 | 0 | subs.Serialize(subList); |
410 | 0 | aResult.Append(subList); |
411 | 0 | } |
412 | 0 |
|
413 | 0 | aResult.Append('\n'); |
414 | 0 | } |
415 | 0 |
|
416 | 0 | // Load meta data from *.metadata files in the root directory. |
417 | 0 | // Specifically for v4 tables. |
418 | 0 | nsCString metadata; |
419 | 0 | nsresult rv = LoadMetadata(mRootStoreDirectory, metadata); |
420 | 0 | if (NS_SUCCEEDED(rv)) { |
421 | 0 | aResult.Append(metadata); |
422 | 0 | } |
423 | 0 |
|
424 | 0 | // Update the TableRequest result in-memory cache. |
425 | 0 | mTableRequestResult = aResult; |
426 | 0 | mIsTableRequestResultOutdated = false; |
427 | 0 | } |
428 | | |
429 | | nsresult |
430 | | Classifier::Check(const nsACString& aSpec, |
431 | | const nsACString& aTables, |
432 | | LookupResultArray& aResults) |
433 | 0 | { |
434 | 0 | Telemetry::AutoTimer<Telemetry::URLCLASSIFIER_CL_CHECK_TIME> timer; |
435 | 0 |
|
436 | 0 | // Get the set of fragments based on the url. This is necessary because we |
437 | 0 | // only look up at most 5 URLs per aSpec, even if aSpec has more than 5 |
438 | 0 | // components. |
439 | 0 | nsTArray<nsCString> fragments; |
440 | 0 | nsresult rv = LookupCache::GetLookupFragments(aSpec, &fragments); |
441 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
442 | 0 |
|
443 | 0 | nsTArray<nsCString> activeTables; |
444 | 0 | SplitTables(aTables, activeTables); |
445 | 0 |
|
446 | 0 | LookupCacheArray cacheArray; |
447 | 0 | for (uint32_t i = 0; i < activeTables.Length(); i++) { |
448 | 0 | LOG(("Checking table %s", activeTables[i].get())); |
449 | 0 | RefPtr<LookupCache> cache = GetLookupCache(activeTables[i]); |
450 | 0 | if (cache) { |
451 | 0 | cacheArray.AppendElement(cache); |
452 | 0 | } else { |
453 | 0 | return NS_ERROR_FAILURE; |
454 | 0 | } |
455 | 0 | } |
456 | 0 |
|
457 | 0 | // Now check each lookup fragment against the entries in the DB. |
458 | 0 | for (uint32_t i = 0; i < fragments.Length(); i++) { |
459 | 0 | Completion lookupHash; |
460 | 0 | lookupHash.FromPlaintext(fragments[i]); |
461 | 0 |
|
462 | 0 | if (LOG_ENABLED()) { |
463 | 0 | nsAutoCString checking; |
464 | 0 | lookupHash.ToHexString(checking); |
465 | 0 | LOG(("Checking fragment %s, hash %s (%X)", fragments[i].get(), |
466 | 0 | checking.get(), lookupHash.ToUint32())); |
467 | 0 | } |
468 | 0 |
|
469 | 0 | for (uint32_t i = 0; i < cacheArray.Length(); i++) { |
470 | 0 | RefPtr<LookupCache> cache = cacheArray[i]; |
471 | 0 | bool has, confirmed; |
472 | 0 | uint32_t matchLength; |
473 | 0 |
|
474 | 0 | rv = cache->Has(lookupHash, &has, &matchLength, &confirmed); |
475 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
476 | 0 |
|
477 | 0 | if (has) { |
478 | 0 | RefPtr<LookupResult> result = new LookupResult; |
479 | 0 | aResults.AppendElement(result); |
480 | 0 |
|
481 | 0 | LOG(("Found a result in %s: %s", |
482 | 0 | cache->TableName().get(), |
483 | 0 | confirmed ? "confirmed." : "Not confirmed.")); |
484 | 0 |
|
485 | 0 | result->hash.complete = lookupHash; |
486 | 0 | result->mConfirmed = confirmed; |
487 | 0 | result->mTableName.Assign(cache->TableName()); |
488 | 0 | result->mPartialHashLength = confirmed ? COMPLETE_SIZE : matchLength; |
489 | 0 | result->mProtocolV2 = LookupCache::Cast<LookupCacheV2>(cache); |
490 | 0 | } |
491 | 0 | } |
492 | 0 | } |
493 | 0 |
|
494 | 0 | return NS_OK; |
495 | 0 | } |
496 | | |
497 | | static nsresult |
498 | | SwapDirectoryContent(nsIFile* aDir1, |
499 | | nsIFile* aDir2, |
500 | | nsIFile* aParentDir, |
501 | | nsIFile* aTempDir) |
502 | 0 | { |
503 | 0 | // Pre-condition: |aDir1| and |aDir2| are directory and their parent |
504 | 0 | // are both |aParentDir|. |
505 | 0 | // |
506 | 0 | // Post-condition: The locations where aDir1 and aDir2 point to will not |
507 | 0 | // change but their contents will be exchanged. If we failed |
508 | 0 | // to swap their content, everything will be rolled back. |
509 | 0 |
|
510 | 0 | nsAutoCString tempDirName; |
511 | 0 | aTempDir->GetNativeLeafName(tempDirName); |
512 | 0 |
|
513 | 0 | nsresult rv; |
514 | 0 |
|
515 | 0 | nsAutoCString dirName1, dirName2; |
516 | 0 | aDir1->GetNativeLeafName(dirName1); |
517 | 0 | aDir2->GetNativeLeafName(dirName2); |
518 | 0 |
|
519 | 0 | LOG(("Swapping directories %s and %s...", dirName1.get(), |
520 | 0 | dirName2.get())); |
521 | 0 |
|
522 | 0 | // 1. Rename "dirName1" to "temp" |
523 | 0 | rv = aDir1->RenameToNative(nullptr, tempDirName); |
524 | 0 | if (NS_FAILED(rv)) { |
525 | 0 | LOG(("Unable to rename %s to %s", dirName1.get(), |
526 | 0 | tempDirName.get())); |
527 | 0 | return rv; // Nothing to roll back. |
528 | 0 | } |
529 | 0 |
|
530 | 0 | // 1.1. Create a handle for temp directory. This is required since |
531 | 0 | // |nsIFile.rename| will not change the location where the |
532 | 0 | // object points to. |
533 | 0 | nsCOMPtr<nsIFile> tempDirectory; |
534 | 0 | rv = aParentDir->Clone(getter_AddRefs(tempDirectory)); |
535 | 0 | rv = tempDirectory->AppendNative(tempDirName); |
536 | 0 |
|
537 | 0 | // 2. Rename "dirName2" to "dirName1". |
538 | 0 | rv = aDir2->RenameToNative(nullptr, dirName1); |
539 | 0 | if (NS_FAILED(rv)) { |
540 | 0 | LOG(("Failed to rename %s to %s. Rename temp directory back to %s", |
541 | 0 | dirName2.get(), dirName1.get(), dirName1.get())); |
542 | 0 | nsresult rbrv = tempDirectory->RenameToNative(nullptr, dirName1); |
543 | 0 | NS_ENSURE_SUCCESS(rbrv, rbrv); |
544 | 0 | return rv; |
545 | 0 | } |
546 | 0 | |
547 | 0 | // 3. Rename "temp" to "dirName2". |
548 | 0 | rv = tempDirectory->RenameToNative(nullptr, dirName2); |
549 | 0 | if (NS_FAILED(rv)) { |
550 | 0 | LOG(("Failed to rename temp directory to %s. ", dirName2.get())); |
551 | 0 | // We've done (1) renaming "dir1 to temp" and |
552 | 0 | // (2) renaming "dir2 to dir1" |
553 | 0 | // so the rollback is |
554 | 0 | // (1) renaming "dir1 to dir2" and |
555 | 0 | // (2) renaming "temp to dir1" |
556 | 0 | nsresult rbrv; // rollback result |
557 | 0 | rbrv = aDir1->RenameToNative(nullptr, dirName2); |
558 | 0 | NS_ENSURE_SUCCESS(rbrv, rbrv); |
559 | 0 | rbrv = tempDirectory->RenameToNative(nullptr, dirName1); |
560 | 0 | NS_ENSURE_SUCCESS(rbrv, rbrv); |
561 | 0 | return rv; |
562 | 0 | } |
563 | 0 | |
564 | 0 | return rv; |
565 | 0 | } |
566 | | |
567 | | void |
568 | | Classifier::RemoveUpdateIntermediaries() |
569 | 0 | { |
570 | 0 | // Remove old LookupCaches. |
571 | 0 | mNewLookupCaches.Clear(); |
572 | 0 |
|
573 | 0 | // Remove the "old" directory. (despite its looking-new name) |
574 | 0 | if (NS_FAILED(mUpdatingDirectory->Remove(true))) { |
575 | 0 | // If the directory is locked from removal for some reason, |
576 | 0 | // we will fail here and it doesn't matter until the next |
577 | 0 | // update. (the next udpate will fail due to the removable |
578 | 0 | // "safebrowsing-udpating" directory.) |
579 | 0 | LOG(("Failed to remove updating directory.")); |
580 | 0 | } |
581 | 0 | } |
582 | | |
583 | | void |
584 | | Classifier::CopyAndInvalidateFullHashCache() |
585 | 0 | { |
586 | 0 | MOZ_ASSERT(NS_GetCurrentThread() != mUpdateThread, |
587 | 0 | "CopyAndInvalidateFullHashCache cannot be called on update thread " |
588 | 0 | "since it mutates mLookupCaches which is only safe on " |
589 | 0 | "worker thread."); |
590 | 0 |
|
591 | 0 | // New lookup caches are built from disk, data likes cache which is |
592 | 0 | // generated online won't exist. We have to manually copy cache from |
593 | 0 | // old LookupCache to new LookupCache. |
594 | 0 | for (auto& newCache: mNewLookupCaches) { |
595 | 0 | for (auto& oldCache: mLookupCaches) { |
596 | 0 | if (oldCache->TableName() == newCache->TableName()) { |
597 | 0 | newCache->CopyFullHashCache(oldCache); |
598 | 0 | break; |
599 | 0 | } |
600 | 0 | } |
601 | 0 | } |
602 | 0 |
|
603 | 0 | // Clear cache when update. |
604 | 0 | // Invalidate cache entries in CopyAndInvalidateFullHashCache because only |
605 | 0 | // at this point we will have cache data in LookupCache. |
606 | 0 | for (auto& newCache: mNewLookupCaches) { |
607 | 0 | newCache->InvalidateExpiredCacheEntries(); |
608 | 0 | } |
609 | 0 | } |
610 | | |
611 | | void |
612 | | Classifier::MergeNewLookupCaches() |
613 | 0 | { |
614 | 0 | MOZ_ASSERT(NS_GetCurrentThread() != mUpdateThread, |
615 | 0 | "MergeNewLookupCaches cannot be called on update thread " |
616 | 0 | "since it mutates mLookupCaches which is only safe on " |
617 | 0 | "worker thread."); |
618 | 0 |
|
619 | 0 | for (auto& newCache: mNewLookupCaches) { |
620 | 0 | // For each element in mNewLookCaches, it will be swapped with |
621 | 0 | // - An old cache in mLookupCache with the same table name or |
622 | 0 | // - nullptr (mLookupCache will be expaned) otherwise. |
623 | 0 | size_t swapIndex = 0; |
624 | 0 | for (; swapIndex < mLookupCaches.Length(); swapIndex++) { |
625 | 0 | if (mLookupCaches[swapIndex]->TableName() == newCache->TableName()) { |
626 | 0 | break; |
627 | 0 | } |
628 | 0 | } |
629 | 0 | if (swapIndex == mLookupCaches.Length()) { |
630 | 0 | mLookupCaches.AppendElement(nullptr); |
631 | 0 | } |
632 | 0 |
|
633 | 0 | Swap(mLookupCaches[swapIndex], newCache); |
634 | 0 | mLookupCaches[swapIndex]->UpdateRootDirHandle(mRootStoreDirectory); |
635 | 0 | } |
636 | 0 |
|
637 | 0 | // At this point, mNewLookupCaches's length remains the same but |
638 | 0 | // will contain either old cache (override) or nullptr (append). |
639 | 0 | } |
640 | | |
641 | | nsresult |
642 | | Classifier::SwapInNewTablesAndCleanup() |
643 | 0 | { |
644 | 0 | nsresult rv; |
645 | 0 |
|
646 | 0 | // Step 1. Swap in on-disk tables. The idea of using "safebrowsing-backup" |
647 | 0 | // as the intermediary directory is we can get databases recovered if |
648 | 0 | // crash occurred in any step of the swap. (We will recover from |
649 | 0 | // "safebrowsing-backup" in OpenDb().) |
650 | 0 | rv = SwapDirectoryContent(mUpdatingDirectory, // contains new tables |
651 | 0 | mRootStoreDirectory, // contains old tables |
652 | 0 | mCacheDirectory, // common parent dir |
653 | 0 | mBackupDirectory); // intermediary dir for swap |
654 | 0 | if (NS_FAILED(rv)) { |
655 | 0 | LOG(("Failed to swap in on-disk tables.")); |
656 | 0 | RemoveUpdateIntermediaries(); |
657 | 0 | return rv; |
658 | 0 | } |
659 | 0 |
|
660 | 0 | // Step 2. Merge mNewLookupCaches into mLookupCaches. The outdated |
661 | 0 | // LookupCaches will be stored in mNewLookupCaches and be cleaned |
662 | 0 | // up later. |
663 | 0 | MergeNewLookupCaches(); |
664 | 0 |
|
665 | 0 | // Step 3. Re-generate active tables based on on-disk tables. |
666 | 0 | rv = RegenActiveTables(); |
667 | 0 | if (NS_FAILED(rv)) { |
668 | 0 | LOG(("Failed to re-generate active tables!")); |
669 | 0 | } |
670 | 0 |
|
671 | 0 | // Step 4. Clean up intermediaries for update. |
672 | 0 | RemoveUpdateIntermediaries(); |
673 | 0 |
|
674 | 0 | // Step 5. Invalidate cached tableRequest request. |
675 | 0 | mIsTableRequestResultOutdated = true; |
676 | 0 |
|
677 | 0 | LOG(("Done swap in updated tables.")); |
678 | 0 |
|
679 | 0 | return rv; |
680 | 0 | } |
681 | | |
682 | | void Classifier::FlushAndDisableAsyncUpdate() |
683 | 0 | { |
684 | 0 | LOG(("Classifier::FlushAndDisableAsyncUpdate [%p, %p]", this, mUpdateThread.get())); |
685 | 0 |
|
686 | 0 | if (!mUpdateThread) { |
687 | 0 | LOG(("Async update has been disabled.")); |
688 | 0 | return; |
689 | 0 | } |
690 | 0 |
|
691 | 0 | mUpdateThread->Shutdown(); |
692 | 0 | mUpdateThread = nullptr; |
693 | 0 | } |
694 | | |
695 | | nsresult |
696 | | Classifier::AsyncApplyUpdates(const TableUpdateArray& aUpdates, |
697 | | const AsyncUpdateCallback& aCallback) |
698 | 0 | { |
699 | 0 | LOG(("Classifier::AsyncApplyUpdates")); |
700 | 0 |
|
701 | 0 | if (!mUpdateThread) { |
702 | 0 | LOG(("Async update has already been disabled.")); |
703 | 0 | return NS_ERROR_FAILURE; |
704 | 0 | } |
705 | 0 |
|
706 | 0 | // Caller thread | Update thread |
707 | 0 | // -------------------------------------------------------- |
708 | 0 | // | ApplyUpdatesBackground |
709 | 0 | // (processing other task) | (bg-update done. ping back to caller thread) |
710 | 0 | // (processing other task) | idle... |
711 | 0 | // ApplyUpdatesForeground | |
712 | 0 | // callback | |
713 | 0 |
|
714 | 0 | MOZ_ASSERT(mNewLookupCaches.IsEmpty(), |
715 | 0 | "There should be no leftovers from a previous update."); |
716 | 0 |
|
717 | 0 | mUpdateInterrupted = false; |
718 | 0 | nsresult rv = mRootStoreDirectory->Clone(getter_AddRefs(mRootStoreDirectoryForUpdate)); |
719 | 0 | if (NS_FAILED(rv)) { |
720 | 0 | LOG(("Failed to clone mRootStoreDirectory for update.")); |
721 | 0 | return rv; |
722 | 0 | } |
723 | 0 |
|
724 | 0 | nsCOMPtr<nsIThread> callerThread = NS_GetCurrentThread(); |
725 | 0 | MOZ_ASSERT(callerThread != mUpdateThread); |
726 | 0 |
|
727 | 0 | RefPtr<Classifier> self = this; |
728 | 0 | nsCOMPtr<nsIRunnable> bgRunnable = |
729 | 0 | NS_NewRunnableFunction("safebrowsing::Classifier::AsyncApplyUpdates", |
730 | 0 | [self, aUpdates, aCallback, callerThread] { |
731 | 0 | MOZ_ASSERT(NS_GetCurrentThread() == self->mUpdateThread, |
732 | 0 | "MUST be on update thread"); |
733 | 0 |
|
734 | 0 | nsresult bgRv; |
735 | 0 | nsCString failedTableName; |
736 | 0 |
|
737 | 0 | TableUpdateArray updates; |
738 | 0 |
|
739 | 0 | // Make a copy of the array since we'll be removing entries as |
740 | 0 | // we process them on the background thread. |
741 | 0 | if (updates.AppendElements(aUpdates, fallible)) { |
742 | 0 | LOG(("Step 1. ApplyUpdatesBackground on update thread.")); |
743 | 0 | bgRv = self->ApplyUpdatesBackground(updates, failedTableName); |
744 | 0 | } else { |
745 | 0 | LOG(("Step 1. Not enough memory to run ApplyUpdatesBackground on update thread.")); |
746 | 0 | bgRv = NS_ERROR_OUT_OF_MEMORY; |
747 | 0 | } |
748 | 0 |
|
749 | 0 | nsCOMPtr<nsIRunnable> fgRunnable = NS_NewRunnableFunction( |
750 | 0 | "safebrowsing::Classifier::AsyncApplyUpdates", |
751 | 0 | [self, aCallback, bgRv, failedTableName, callerThread] { |
752 | 0 | MOZ_ASSERT(NS_GetCurrentThread() == callerThread, |
753 | 0 | "MUST be on caller thread"); |
754 | 0 |
|
755 | 0 | LOG(("Step 2. ApplyUpdatesForeground on caller thread")); |
756 | 0 | nsresult rv = self->ApplyUpdatesForeground(bgRv, failedTableName); |
757 | 0 |
|
758 | 0 | LOG(("Step 3. Updates applied! Fire callback.")); |
759 | 0 | aCallback(rv); |
760 | 0 | }); |
761 | 0 | callerThread->Dispatch(fgRunnable, NS_DISPATCH_NORMAL); |
762 | 0 | }); |
763 | 0 |
|
764 | 0 | return mUpdateThread->Dispatch(bgRunnable, NS_DISPATCH_NORMAL); |
765 | 0 | } |
766 | | |
767 | | nsresult |
768 | | Classifier::ApplyUpdatesBackground(TableUpdateArray& aUpdates, |
769 | | nsACString& aFailedTableName) |
770 | 0 | { |
771 | 0 | // |mUpdateInterrupted| is guaranteed to have been unset. |
772 | 0 | // If |mUpdateInterrupted| is set at any point, Reset() must have |
773 | 0 | // been called then we need to interrupt the update process. |
774 | 0 | // We only add checkpoints for non-trivial tasks. |
775 | 0 |
|
776 | 0 | if (aUpdates.IsEmpty()) { |
777 | 0 | return NS_OK; |
778 | 0 | } |
779 | 0 | |
780 | 0 | nsCOMPtr<nsIUrlClassifierUtils> urlUtil = |
781 | 0 | do_GetService(NS_URLCLASSIFIERUTILS_CONTRACTID); |
782 | 0 |
|
783 | 0 | nsCString provider; |
784 | 0 | // Assume all TableUpdate objects should have the same provider. |
785 | 0 | urlUtil->GetTelemetryProvider(aUpdates[0]->TableName(), provider); |
786 | 0 |
|
787 | 0 | Telemetry::AutoTimer<Telemetry::URLCLASSIFIER_CL_KEYED_UPDATE_TIME> |
788 | 0 | keyedTimer(provider); |
789 | 0 |
|
790 | 0 | PRIntervalTime clockStart = 0; |
791 | 0 | if (LOG_ENABLED()) { |
792 | 0 | clockStart = PR_IntervalNow(); |
793 | 0 | } |
794 | 0 |
|
795 | 0 | nsresult rv; |
796 | 0 |
|
797 | 0 | // Check point 1: Copying files takes time so we check |mUpdateInterrupted| |
798 | 0 | // inside CopyInUseDirForUpdate(). |
799 | 0 | rv = CopyInUseDirForUpdate(); // i.e. mUpdatingDirectory will be setup. |
800 | 0 | if (NS_FAILED(rv)) { |
801 | 0 | LOG(("Failed to copy in-use directory for update.")); |
802 | 0 | return (rv == NS_ERROR_ABORT) ? NS_OK : rv; |
803 | 0 | } |
804 | 0 |
|
805 | 0 | LOG(("Applying %zu table updates.", aUpdates.Length())); |
806 | 0 |
|
807 | 0 | for (uint32_t i = 0; i < aUpdates.Length(); i++) { |
808 | 0 | RefPtr<const TableUpdate> update = aUpdates[i]; |
809 | 0 | if (!update) { |
810 | 0 | // Previous UpdateHashStore() may have consumed this update.. |
811 | 0 | continue; |
812 | 0 | } |
813 | 0 | |
814 | 0 | // Run all updates for one table |
815 | 0 | nsAutoCString updateTable(update->TableName()); |
816 | 0 |
|
817 | 0 | // Check point 2: Processing downloaded data takes time. |
818 | 0 | if (mUpdateInterrupted) { |
819 | 0 | LOG(("Update is interrupted. Stop building new tables.")); |
820 | 0 | return NS_OK; |
821 | 0 | } |
822 | 0 |
|
823 | 0 | // Will update the mirrored in-memory and on-disk databases. |
824 | 0 | if (TableUpdate::Cast<TableUpdateV2>(update)) { |
825 | 0 | rv = UpdateHashStore(aUpdates, updateTable); |
826 | 0 | } else { |
827 | 0 | rv = UpdateTableV4(aUpdates, updateTable); |
828 | 0 | } |
829 | 0 |
|
830 | 0 | if (NS_FAILED(rv)) { |
831 | 0 | aFailedTableName = updateTable; |
832 | 0 | RemoveUpdateIntermediaries(); |
833 | 0 | return rv; |
834 | 0 | } |
835 | 0 | } |
836 | 0 |
|
837 | 0 | if (LOG_ENABLED()) { |
838 | 0 | PRIntervalTime clockEnd = PR_IntervalNow(); |
839 | 0 | LOG(("update took %dms\n", |
840 | 0 | PR_IntervalToMilliseconds(clockEnd - clockStart))); |
841 | 0 | } |
842 | 0 |
|
843 | 0 | return rv; |
844 | 0 | } |
845 | | |
846 | | nsresult |
847 | | Classifier::ApplyUpdatesForeground(nsresult aBackgroundRv, |
848 | | const nsACString& aFailedTableName) |
849 | 0 | { |
850 | 0 | if (mUpdateInterrupted) { |
851 | 0 | LOG(("Update is interrupted! Just remove update intermediaries.")); |
852 | 0 | RemoveUpdateIntermediaries(); |
853 | 0 | return NS_OK; |
854 | 0 | } |
855 | 0 | if (NS_SUCCEEDED(aBackgroundRv)) { |
856 | 0 | // Copy and Invalidate fullhash cache here because this call requires |
857 | 0 | // mLookupCaches which is only available on work-thread |
858 | 0 | CopyAndInvalidateFullHashCache(); |
859 | 0 |
|
860 | 0 | return SwapInNewTablesAndCleanup(); |
861 | 0 | } |
862 | 0 | if (NS_ERROR_OUT_OF_MEMORY != aBackgroundRv) { |
863 | 0 | ResetTables(Clear_All, nsTArray<nsCString> { nsCString(aFailedTableName) }); |
864 | 0 | } |
865 | 0 | return aBackgroundRv; |
866 | 0 | } |
867 | | |
868 | | nsresult |
869 | | Classifier::ApplyFullHashes(ConstTableUpdateArray& aUpdates) |
870 | 0 | { |
871 | 0 | MOZ_ASSERT(NS_GetCurrentThread() != mUpdateThread, |
872 | 0 | "ApplyFullHashes() MUST NOT be called on update thread"); |
873 | 0 | MOZ_ASSERT(!NS_IsMainThread(), |
874 | 0 | "ApplyFullHashes() must be called on the classifier worker thread."); |
875 | 0 |
|
876 | 0 | LOG(("Applying %zu table gethashes.", aUpdates.Length())); |
877 | 0 |
|
878 | 0 | for (uint32_t i = 0; i < aUpdates.Length(); i++) { |
879 | 0 | nsresult rv = UpdateCache(aUpdates[i]); |
880 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
881 | 0 |
|
882 | 0 | aUpdates[i] = nullptr; |
883 | 0 | } |
884 | 0 |
|
885 | 0 | return NS_OK; |
886 | 0 | } |
887 | | |
888 | | void |
889 | | Classifier::GetCacheInfo(const nsACString& aTable, |
890 | | nsIUrlClassifierCacheInfo** aCache) |
891 | 0 | { |
892 | 0 | RefPtr<const LookupCache> lookupCache = GetLookupCache(aTable); |
893 | 0 | if (!lookupCache) { |
894 | 0 | return; |
895 | 0 | } |
896 | 0 | |
897 | 0 | lookupCache->GetCacheInfo(aCache); |
898 | 0 | } |
899 | | |
900 | | void |
901 | | Classifier::DropStores() |
902 | 0 | { |
903 | 0 | // See the comment in Classifier::Close() before adding anything here. |
904 | 0 | mLookupCaches.Clear(); |
905 | 0 | } |
906 | | |
907 | | nsresult |
908 | | Classifier::RegenActiveTables() |
909 | 0 | { |
910 | 0 | if (mIsClosed) { |
911 | 0 | return NS_OK; // nothing to do, the classifier is done |
912 | 0 | } |
913 | 0 | |
914 | 0 | mActiveTablesCache.Clear(); |
915 | 0 |
|
916 | 0 | nsTArray<nsCString> foundTables; |
917 | 0 | ScanStoreDir(mRootStoreDirectory, foundTables); |
918 | 0 |
|
919 | 0 | for (uint32_t i = 0; i < foundTables.Length(); i++) { |
920 | 0 | nsCString table(foundTables[i]); |
921 | 0 |
|
922 | 0 | RefPtr<const LookupCache> lookupCache = GetLookupCache(table); |
923 | 0 | if (!lookupCache) { |
924 | 0 | LOG(("Inactive table (no cache): %s", table.get())); |
925 | 0 | continue; |
926 | 0 | } |
927 | 0 |
|
928 | 0 | if (!lookupCache->IsPrimed()) { |
929 | 0 | LOG(("Inactive table (cache not primed): %s", table.get())); |
930 | 0 | continue; |
931 | 0 | } |
932 | 0 |
|
933 | 0 | if (LookupCache::Cast<const LookupCacheV4>(lookupCache)) { |
934 | 0 | LOG(("Active v4 table: %s", table.get())); |
935 | 0 | } else { |
936 | 0 | HashStore store(table, GetProvider(table), mRootStoreDirectory); |
937 | 0 |
|
938 | 0 | nsresult rv = store.Open(); |
939 | 0 | if (NS_FAILED(rv)) { |
940 | 0 | continue; |
941 | 0 | } |
942 | 0 | |
943 | 0 | const ChunkSet &adds = store.AddChunks(); |
944 | 0 | const ChunkSet &subs = store.SubChunks(); |
945 | 0 |
|
946 | 0 | if (adds.Length() == 0 && subs.Length() == 0) { |
947 | 0 | continue; |
948 | 0 | } |
949 | 0 | |
950 | 0 | LOG(("Active v2 table: %s", store.TableName().get())); |
951 | 0 | } |
952 | 0 |
|
953 | 0 | mActiveTablesCache.AppendElement(table); |
954 | 0 | } |
955 | 0 |
|
956 | 0 | return NS_OK; |
957 | 0 | } |
958 | | |
959 | | nsresult |
960 | | Classifier::ScanStoreDir(nsIFile* aDirectory, nsTArray<nsCString>& aTables) |
961 | 0 | { |
962 | 0 | nsCOMPtr<nsIDirectoryEnumerator> entries; |
963 | 0 | nsresult rv = aDirectory->GetDirectoryEntries(getter_AddRefs(entries)); |
964 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
965 | 0 |
|
966 | 0 | nsCOMPtr<nsIFile> file; |
967 | 0 | while (NS_SUCCEEDED(rv = entries->GetNextFile(getter_AddRefs(file))) && file) { |
968 | 0 | // If |file| is a directory, recurse to find its entries as well. |
969 | 0 | bool isDirectory; |
970 | 0 | if (NS_FAILED(file->IsDirectory(&isDirectory))) { |
971 | 0 | continue; |
972 | 0 | } |
973 | 0 | if (isDirectory) { |
974 | 0 | ScanStoreDir(file, aTables); |
975 | 0 | continue; |
976 | 0 | } |
977 | 0 | |
978 | 0 | nsCString leafName; |
979 | 0 | rv = file->GetNativeLeafName(leafName); |
980 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
981 | 0 |
|
982 | 0 | // Both v2 and v4 contain .pset file |
983 | 0 | nsCString suffix(NS_LITERAL_CSTRING(".pset")); |
984 | 0 |
|
985 | 0 | int32_t dot = leafName.RFind(suffix); |
986 | 0 | if (dot != -1) { |
987 | 0 | leafName.Cut(dot, suffix.Length()); |
988 | 0 | aTables.AppendElement(leafName); |
989 | 0 | } |
990 | 0 | } |
991 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
992 | 0 |
|
993 | 0 | return NS_OK; |
994 | 0 | } |
995 | | |
996 | | nsresult |
997 | | Classifier::ActiveTables(nsTArray<nsCString>& aTables) const |
998 | 0 | { |
999 | 0 | aTables = mActiveTablesCache; |
1000 | 0 | return NS_OK; |
1001 | 0 | } |
1002 | | |
1003 | | nsresult |
1004 | | Classifier::CleanToDelete() |
1005 | 0 | { |
1006 | 0 | bool exists; |
1007 | 0 | nsresult rv = mToDeleteDirectory->Exists(&exists); |
1008 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1009 | 0 |
|
1010 | 0 | if (exists) { |
1011 | 0 | rv = mToDeleteDirectory->Remove(true); |
1012 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1013 | 0 | } |
1014 | 0 |
|
1015 | 0 | return NS_OK; |
1016 | 0 | } |
1017 | | |
1018 | | #ifdef MOZ_SAFEBROWSING_DUMP_FAILED_UPDATES |
1019 | | |
1020 | | already_AddRefed<nsIFile> |
1021 | | Classifier::GetFailedUpdateDirectroy() |
1022 | 0 | { |
1023 | 0 | nsCString failedUpdatekDirName = STORE_DIRECTORY + nsCString("-failedupdate"); |
1024 | 0 |
|
1025 | 0 | nsCOMPtr<nsIFile> failedUpdatekDirectory; |
1026 | 0 | if (NS_FAILED(mCacheDirectory->Clone(getter_AddRefs(failedUpdatekDirectory))) || |
1027 | 0 | NS_FAILED(failedUpdatekDirectory->AppendNative(failedUpdatekDirName))) { |
1028 | 0 | LOG(("Failed to init failedUpdatekDirectory.")); |
1029 | 0 | return nullptr; |
1030 | 0 | } |
1031 | 0 |
|
1032 | 0 | return failedUpdatekDirectory.forget(); |
1033 | 0 | } |
1034 | | |
1035 | | nsresult |
1036 | | Classifier::DumpRawTableUpdates(const nsACString& aRawUpdates) |
1037 | 0 | { |
1038 | 0 | LOG(("Dumping raw table updates...")); |
1039 | 0 |
|
1040 | 0 | DumpFailedUpdate(); |
1041 | 0 |
|
1042 | 0 | nsCOMPtr<nsIFile> failedUpdatekDirectory = GetFailedUpdateDirectroy(); |
1043 | 0 |
|
1044 | 0 | // Create tableupdate.bin and dump raw table update data. |
1045 | 0 | nsCOMPtr<nsIFile> rawTableUpdatesFile; |
1046 | 0 | nsCOMPtr<nsIOutputStream> outputStream; |
1047 | 0 | if (NS_FAILED(failedUpdatekDirectory->Clone(getter_AddRefs(rawTableUpdatesFile))) || |
1048 | 0 | NS_FAILED(rawTableUpdatesFile->AppendNative(nsCString("tableupdates.bin"))) || |
1049 | 0 | NS_FAILED(NS_NewLocalFileOutputStream(getter_AddRefs(outputStream), |
1050 | 0 | rawTableUpdatesFile, |
1051 | 0 | PR_WRONLY | PR_TRUNCATE | PR_CREATE_FILE))) { |
1052 | 0 | LOG(("Failed to create file to dump raw table updates.")); |
1053 | 0 | return NS_ERROR_FAILURE; |
1054 | 0 | } |
1055 | 0 |
|
1056 | 0 | // Write out the data. |
1057 | 0 | uint32_t written; |
1058 | 0 | nsresult rv = outputStream->Write(aRawUpdates.BeginReading(), |
1059 | 0 | aRawUpdates.Length(), &written); |
1060 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1061 | 0 | NS_ENSURE_TRUE(written == aRawUpdates.Length(), NS_ERROR_FAILURE); |
1062 | 0 |
|
1063 | 0 | return rv; |
1064 | 0 | } |
1065 | | |
1066 | | nsresult |
1067 | | Classifier::DumpFailedUpdate() |
1068 | 0 | { |
1069 | 0 | LOG(("Dumping failed update...")); |
1070 | 0 |
|
1071 | 0 | nsCOMPtr<nsIFile> failedUpdatekDirectory = GetFailedUpdateDirectroy(); |
1072 | 0 |
|
1073 | 0 | // Remove the "failed update" directory no matter it exists or not. |
1074 | 0 | // Failure is fine because the directory may not exist. |
1075 | 0 | failedUpdatekDirectory->Remove(true); |
1076 | 0 |
|
1077 | 0 | nsCString failedUpdatekDirName; |
1078 | 0 | nsresult rv = failedUpdatekDirectory->GetNativeLeafName(failedUpdatekDirName); |
1079 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1080 | 0 |
|
1081 | 0 | // Copy the in-use directory to a clean "failed update" directory. |
1082 | 0 | nsCOMPtr<nsIFile> inUseDirectory; |
1083 | 0 | if (NS_FAILED(mRootStoreDirectory->Clone(getter_AddRefs(inUseDirectory))) || |
1084 | 0 | NS_FAILED(inUseDirectory->CopyToNative(nullptr, failedUpdatekDirName))) { |
1085 | 0 | LOG(("Failed to move in-use to the \"failed update\" directory %s", |
1086 | 0 | failedUpdatekDirName.get())); |
1087 | 0 | return NS_ERROR_FAILURE; |
1088 | 0 | } |
1089 | 0 |
|
1090 | 0 | return rv; |
1091 | 0 | } |
1092 | | |
1093 | | #endif // MOZ_SAFEBROWSING_DUMP_FAILED_UPDATES |
1094 | | |
1095 | | /** |
1096 | | * This function copies the files one by one to the destination folder. |
1097 | | * Before copying a file, it checks |mUpdateInterrupted| and returns |
1098 | | * NS_ERROR_ABORT if the flag is set. |
1099 | | */ |
1100 | | nsresult |
1101 | | Classifier::CopyDirectoryInterruptible(nsCOMPtr<nsIFile>& aDestDir, nsCOMPtr<nsIFile>& aSourceDir) |
1102 | 0 | { |
1103 | 0 | nsCOMPtr<nsIDirectoryEnumerator> entries; |
1104 | 0 | nsresult rv = aSourceDir->GetDirectoryEntries(getter_AddRefs(entries)); |
1105 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1106 | 0 | MOZ_ASSERT(entries); |
1107 | 0 |
|
1108 | 0 | nsCOMPtr<nsIFile> source; |
1109 | 0 | while (NS_SUCCEEDED(rv = entries->GetNextFile(getter_AddRefs(source))) && |
1110 | 0 | source) { |
1111 | 0 | if (mUpdateInterrupted) { |
1112 | 0 | LOG(("Update is interrupted. Aborting the directory copy")); |
1113 | 0 | return NS_ERROR_ABORT; |
1114 | 0 | } |
1115 | 0 |
|
1116 | 0 | bool isDirectory; |
1117 | 0 | rv = source->IsDirectory(&isDirectory); |
1118 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1119 | 0 |
|
1120 | 0 | if (isDirectory) { |
1121 | 0 | // If it is a directory, recursively copy the files inside the directory. |
1122 | 0 | nsAutoCString leaf; |
1123 | 0 | source->GetNativeLeafName(leaf); |
1124 | 0 | MOZ_ASSERT(!leaf.IsEmpty()); |
1125 | 0 |
|
1126 | 0 | nsCOMPtr<nsIFile> dest; |
1127 | 0 | aDestDir->Clone(getter_AddRefs(dest)); |
1128 | 0 | dest->AppendNative(leaf); |
1129 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1130 | 0 |
|
1131 | 0 | rv = CopyDirectoryInterruptible(dest, source); |
1132 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1133 | 0 | } else { |
1134 | 0 | rv = source->CopyToNative(aDestDir, EmptyCString()); |
1135 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1136 | 0 | } |
1137 | 0 | } |
1138 | 0 |
|
1139 | 0 | // If the destination directory doesn't exist in the end, it means that the |
1140 | 0 | // source directory is empty, we should copy the directory here. |
1141 | 0 | bool exist; |
1142 | 0 | rv = aDestDir->Exists(&exist); |
1143 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1144 | 0 |
|
1145 | 0 | if (!exist) { |
1146 | 0 | rv = aDestDir->Create(nsIFile::DIRECTORY_TYPE, 0755); |
1147 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1148 | 0 | } |
1149 | 0 |
|
1150 | 0 | return NS_OK; |
1151 | 0 | } |
1152 | | |
1153 | | nsresult |
1154 | | Classifier::CopyInUseDirForUpdate() |
1155 | 0 | { |
1156 | 0 | LOG(("Copy in-use directory content for update.")); |
1157 | 0 |
|
1158 | 0 | // We copy everything from in-use directory to a temporary directory |
1159 | 0 | // for updating. |
1160 | 0 |
|
1161 | 0 | // Remove the destination directory first (just in case) the do the copy. |
1162 | 0 | mUpdatingDirectory->Remove(true); |
1163 | 0 | if (!mRootStoreDirectoryForUpdate) { |
1164 | 0 | LOG(("mRootStoreDirectoryForUpdate is null.")); |
1165 | 0 | return NS_ERROR_NULL_POINTER; |
1166 | 0 | } |
1167 | 0 |
|
1168 | 0 | nsresult rv = CopyDirectoryInterruptible(mUpdatingDirectory, |
1169 | 0 | mRootStoreDirectoryForUpdate); |
1170 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1171 | 0 |
|
1172 | 0 | return NS_OK; |
1173 | 0 | } |
1174 | | |
1175 | | nsresult |
1176 | | Classifier::RecoverBackups() |
1177 | 0 | { |
1178 | 0 | bool backupExists; |
1179 | 0 | nsresult rv = mBackupDirectory->Exists(&backupExists); |
1180 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1181 | 0 |
|
1182 | 0 | if (backupExists) { |
1183 | 0 | // Remove the safebrowsing dir if it exists |
1184 | 0 | nsCString storeDirName; |
1185 | 0 | rv = mRootStoreDirectory->GetNativeLeafName(storeDirName); |
1186 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1187 | 0 |
|
1188 | 0 | bool storeExists; |
1189 | 0 | rv = mRootStoreDirectory->Exists(&storeExists); |
1190 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1191 | 0 |
|
1192 | 0 | if (storeExists) { |
1193 | 0 | rv = mRootStoreDirectory->Remove(true); |
1194 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1195 | 0 | } |
1196 | 0 |
|
1197 | 0 | // Move the backup to the store location |
1198 | 0 | rv = mBackupDirectory->MoveToNative(nullptr, storeDirName); |
1199 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1200 | 0 |
|
1201 | 0 | // mBackupDirectory now points to storeDir, fix up. |
1202 | 0 | rv = SetupPathNames(); |
1203 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1204 | 0 | } |
1205 | 0 |
|
1206 | 0 | return NS_OK; |
1207 | 0 | } |
1208 | | |
1209 | | bool |
1210 | | Classifier::CheckValidUpdate(TableUpdateArray& aUpdates, |
1211 | | const nsACString& aTable) |
1212 | 0 | { |
1213 | 0 | // take the quick exit if there is no valid update for us |
1214 | 0 | // (common case) |
1215 | 0 | uint32_t validupdates = 0; |
1216 | 0 |
|
1217 | 0 | for (uint32_t i = 0; i < aUpdates.Length(); i++) { |
1218 | 0 | RefPtr<const TableUpdate> update = aUpdates[i]; |
1219 | 0 | if (!update || !update->TableName().Equals(aTable)) { |
1220 | 0 | continue; |
1221 | 0 | } |
1222 | 0 | if (update->Empty()) { |
1223 | 0 | aUpdates[i] = nullptr; |
1224 | 0 | continue; |
1225 | 0 | } |
1226 | 0 | validupdates++; |
1227 | 0 | } |
1228 | 0 |
|
1229 | 0 | if (!validupdates) { |
1230 | 0 | // This can happen if the update was only valid for one table. |
1231 | 0 | return false; |
1232 | 0 | } |
1233 | 0 | |
1234 | 0 | return true; |
1235 | 0 | } |
1236 | | |
1237 | | nsCString |
1238 | | Classifier::GetProvider(const nsACString& aTableName) |
1239 | 0 | { |
1240 | 0 | nsCOMPtr<nsIUrlClassifierUtils> urlUtil = |
1241 | 0 | do_GetService(NS_URLCLASSIFIERUTILS_CONTRACTID); |
1242 | 0 |
|
1243 | 0 | nsCString provider; |
1244 | 0 | nsresult rv = urlUtil->GetProvider(aTableName, provider); |
1245 | 0 |
|
1246 | 0 | return NS_SUCCEEDED(rv) ? provider : EmptyCString(); |
1247 | 0 | } |
1248 | | |
1249 | | /* |
1250 | | * This will consume+delete updates from the passed nsTArray. |
1251 | | */ |
1252 | | nsresult |
1253 | | Classifier::UpdateHashStore(TableUpdateArray& aUpdates, |
1254 | | const nsACString& aTable) |
1255 | 0 | { |
1256 | 0 | if (nsUrlClassifierDBService::ShutdownHasStarted()) { |
1257 | 0 | return NS_ERROR_UC_UPDATE_SHUTDOWNING; |
1258 | 0 | } |
1259 | 0 | |
1260 | 0 | LOG(("Classifier::UpdateHashStore(%s)", PromiseFlatCString(aTable).get())); |
1261 | 0 |
|
1262 | 0 | HashStore store(aTable, GetProvider(aTable), mUpdatingDirectory); |
1263 | 0 |
|
1264 | 0 | if (!CheckValidUpdate(aUpdates, store.TableName())) { |
1265 | 0 | return NS_OK; |
1266 | 0 | } |
1267 | 0 | |
1268 | 0 | nsresult rv = store.Open(); |
1269 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1270 | 0 | rv = store.BeginUpdate(); |
1271 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1272 | 0 |
|
1273 | 0 | // Read the part of the store that is (only) in the cache |
1274 | 0 | RefPtr<LookupCacheV2> lookupCacheV2; |
1275 | 0 | { |
1276 | 0 | RefPtr<LookupCache> lookupCache = GetLookupCacheForUpdate(store.TableName()); |
1277 | 0 | if (lookupCache) { |
1278 | 0 | lookupCacheV2 = LookupCache::Cast<LookupCacheV2>(lookupCache); |
1279 | 0 | } |
1280 | 0 | } |
1281 | 0 | if (!lookupCacheV2) { |
1282 | 0 | return NS_ERROR_UC_UPDATE_TABLE_NOT_FOUND; |
1283 | 0 | } |
1284 | 0 | |
1285 | 0 | FallibleTArray<uint32_t> AddPrefixHashes; |
1286 | 0 | rv = lookupCacheV2->GetPrefixes(AddPrefixHashes); |
1287 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1288 | 0 | rv = store.AugmentAdds(AddPrefixHashes); |
1289 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1290 | 0 | AddPrefixHashes.Clear(); |
1291 | 0 |
|
1292 | 0 | uint32_t applied = 0; |
1293 | 0 |
|
1294 | 0 | for (uint32_t i = 0; i < aUpdates.Length(); i++) { |
1295 | 0 | RefPtr<TableUpdate> update = aUpdates[i]; |
1296 | 0 | if (!update || !update->TableName().Equals(store.TableName())) { |
1297 | 0 | continue; |
1298 | 0 | } |
1299 | 0 | |
1300 | 0 | RefPtr<TableUpdateV2> updateV2 = TableUpdate::Cast<TableUpdateV2>(update); |
1301 | 0 | NS_ENSURE_TRUE(updateV2, NS_ERROR_UC_UPDATE_UNEXPECTED_VERSION); |
1302 | 0 |
|
1303 | 0 | rv = store.ApplyUpdate(updateV2); |
1304 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1305 | 0 |
|
1306 | 0 | applied++; |
1307 | 0 |
|
1308 | 0 | LOG(("Applied update to table %s:", store.TableName().get())); |
1309 | 0 | LOG((" %d add chunks", updateV2->AddChunks().Length())); |
1310 | 0 | LOG((" %zu add prefixes", updateV2->AddPrefixes().Length())); |
1311 | 0 | LOG((" %zu add completions", updateV2->AddCompletes().Length())); |
1312 | 0 | LOG((" %d sub chunks", updateV2->SubChunks().Length())); |
1313 | 0 | LOG((" %zu sub prefixes", updateV2->SubPrefixes().Length())); |
1314 | 0 | LOG((" %zu sub completions", updateV2->SubCompletes().Length())); |
1315 | 0 | LOG((" %d add expirations", updateV2->AddExpirations().Length())); |
1316 | 0 | LOG((" %d sub expirations", updateV2->SubExpirations().Length())); |
1317 | 0 |
|
1318 | 0 | aUpdates[i] = nullptr; |
1319 | 0 | } |
1320 | 0 |
|
1321 | 0 | LOG(("Applied %d update(s) to %s.", applied, store.TableName().get())); |
1322 | 0 |
|
1323 | 0 | rv = store.Rebuild(); |
1324 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1325 | 0 |
|
1326 | 0 | LOG(("Table %s now has:", store.TableName().get())); |
1327 | 0 | LOG((" %d add chunks", store.AddChunks().Length())); |
1328 | 0 | LOG((" %zu add prefixes", store.AddPrefixes().Length())); |
1329 | 0 | LOG((" %zu add completions", store.AddCompletes().Length())); |
1330 | 0 | LOG((" %d sub chunks", store.SubChunks().Length())); |
1331 | 0 | LOG((" %zu sub prefixes", store.SubPrefixes().Length())); |
1332 | 0 | LOG((" %zu sub completions", store.SubCompletes().Length())); |
1333 | 0 |
|
1334 | 0 | rv = store.WriteFile(); |
1335 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1336 | 0 |
|
1337 | 0 | // At this point the store is updated and written out to disk, but |
1338 | 0 | // the data is still in memory. Build our quick-lookup table here. |
1339 | 0 | rv = lookupCacheV2->Build(store.AddPrefixes(), store.AddCompletes()); |
1340 | 0 | NS_ENSURE_SUCCESS(rv, NS_ERROR_UC_UPDATE_BUILD_PREFIX_FAILURE); |
1341 | 0 |
|
1342 | | #if defined(DEBUG) |
1343 | | lookupCacheV2->DumpCompletions(); |
1344 | | #endif |
1345 | 0 | rv = lookupCacheV2->WriteFile(); |
1346 | 0 | NS_ENSURE_SUCCESS(rv, NS_ERROR_UC_UPDATE_FAIL_TO_WRITE_DISK); |
1347 | 0 |
|
1348 | 0 | LOG(("Successfully updated %s", store.TableName().get())); |
1349 | 0 |
|
1350 | 0 | return NS_OK; |
1351 | 0 | } |
1352 | | |
1353 | | nsresult |
1354 | | Classifier::UpdateTableV4(TableUpdateArray& aUpdates, |
1355 | | const nsACString& aTable) |
1356 | 0 | { |
1357 | 0 | MOZ_ASSERT(!NS_IsMainThread(), |
1358 | 0 | "UpdateTableV4 must be called on the classifier worker thread."); |
1359 | 0 | if (nsUrlClassifierDBService::ShutdownHasStarted()) { |
1360 | 0 | return NS_ERROR_UC_UPDATE_SHUTDOWNING; |
1361 | 0 | } |
1362 | 0 | |
1363 | 0 | LOG(("Classifier::UpdateTableV4(%s)", PromiseFlatCString(aTable).get())); |
1364 | 0 |
|
1365 | 0 | if (!CheckValidUpdate(aUpdates, aTable)) { |
1366 | 0 | return NS_OK; |
1367 | 0 | } |
1368 | 0 | |
1369 | 0 | RefPtr<LookupCacheV4> lookupCacheV4; |
1370 | 0 | { |
1371 | 0 | RefPtr<LookupCache> lookupCache = GetLookupCacheForUpdate(aTable); |
1372 | 0 | if (lookupCache) { |
1373 | 0 | lookupCacheV4 = LookupCache::Cast<LookupCacheV4>(lookupCache); |
1374 | 0 | } |
1375 | 0 | } |
1376 | 0 | if (!lookupCacheV4) { |
1377 | 0 | return NS_ERROR_UC_UPDATE_TABLE_NOT_FOUND; |
1378 | 0 | } |
1379 | 0 | |
1380 | 0 | nsresult rv = NS_OK; |
1381 | 0 |
|
1382 | 0 | // If there are multiple updates for the same table, prefixes1 & prefixes2 |
1383 | 0 | // will act as input and output in turn to reduce memory copy overhead. |
1384 | 0 | PrefixStringMap prefixes1, prefixes2; |
1385 | 0 | PrefixStringMap* input = &prefixes1; |
1386 | 0 | PrefixStringMap* output = &prefixes2; |
1387 | 0 |
|
1388 | 0 | RefPtr<const TableUpdateV4> lastAppliedUpdate = nullptr; |
1389 | 0 | for (uint32_t i = 0; i < aUpdates.Length(); i++) { |
1390 | 0 | RefPtr<TableUpdate> update = aUpdates[i]; |
1391 | 0 | if (!update || !update->TableName().Equals(aTable)) { |
1392 | 0 | continue; |
1393 | 0 | } |
1394 | 0 | |
1395 | 0 | RefPtr<TableUpdateV4> updateV4 = TableUpdate::Cast<TableUpdateV4>(update); |
1396 | 0 | NS_ENSURE_TRUE(updateV4, NS_ERROR_UC_UPDATE_UNEXPECTED_VERSION); |
1397 | 0 |
|
1398 | 0 | if (updateV4->IsFullUpdate()) { |
1399 | 0 | input->Clear(); |
1400 | 0 | output->Clear(); |
1401 | 0 | rv = lookupCacheV4->ApplyUpdate(updateV4, *input, *output); |
1402 | 0 | if (NS_FAILED(rv)) { |
1403 | 0 | return rv; |
1404 | 0 | } |
1405 | 0 | } else { |
1406 | 0 | // If both prefix sets are empty, this means we are doing a partial update |
1407 | 0 | // without a prior full/partial update in the loop. In this case we should |
1408 | 0 | // get prefixes from the lookup cache first. |
1409 | 0 | if (prefixes1.IsEmpty() && prefixes2.IsEmpty()) { |
1410 | 0 | lookupCacheV4->GetPrefixes(prefixes1); |
1411 | 0 | } else { |
1412 | 0 | MOZ_ASSERT(prefixes1.IsEmpty() ^ prefixes2.IsEmpty()); |
1413 | 0 |
|
1414 | 0 | // When there are multiple partial updates, input should always point |
1415 | 0 | // to the non-empty prefix set(filled by previous full/partial update). |
1416 | 0 | // output should always point to the empty prefix set. |
1417 | 0 | input = prefixes1.IsEmpty() ? &prefixes2 : &prefixes1; |
1418 | 0 | output = prefixes1.IsEmpty() ? &prefixes1 : &prefixes2; |
1419 | 0 | } |
1420 | 0 |
|
1421 | 0 | rv = lookupCacheV4->ApplyUpdate(updateV4, *input, *output); |
1422 | 0 | if (NS_FAILED(rv)) { |
1423 | 0 | return rv; |
1424 | 0 | } |
1425 | 0 | |
1426 | 0 | input->Clear(); |
1427 | 0 | } |
1428 | 0 |
|
1429 | 0 | // Keep track of the last applied update. |
1430 | 0 | lastAppliedUpdate = updateV4; |
1431 | 0 |
|
1432 | 0 | aUpdates[i] = nullptr; |
1433 | 0 | } |
1434 | 0 |
|
1435 | 0 | rv = lookupCacheV4->Build(*output); |
1436 | 0 | NS_ENSURE_SUCCESS(rv, NS_ERROR_UC_UPDATE_BUILD_PREFIX_FAILURE); |
1437 | 0 |
|
1438 | 0 | rv = lookupCacheV4->WriteFile(); |
1439 | 0 | NS_ENSURE_SUCCESS(rv, NS_ERROR_UC_UPDATE_FAIL_TO_WRITE_DISK); |
1440 | 0 |
|
1441 | 0 | if (lastAppliedUpdate) { |
1442 | 0 | LOG(("Write meta data of the last applied update.")); |
1443 | 0 | rv = lookupCacheV4->WriteMetadata(lastAppliedUpdate); |
1444 | 0 | NS_ENSURE_SUCCESS(rv, NS_ERROR_UC_UPDATE_FAIL_TO_WRITE_DISK); |
1445 | 0 | } |
1446 | 0 |
|
1447 | 0 | LOG(("Successfully updated %s\n", PromiseFlatCString(aTable).get())); |
1448 | 0 |
|
1449 | 0 | return NS_OK; |
1450 | 0 | } |
1451 | | |
1452 | | nsresult |
1453 | | Classifier::UpdateCache(RefPtr<const TableUpdate> aUpdate) |
1454 | 0 | { |
1455 | 0 | if (!aUpdate) { |
1456 | 0 | return NS_OK; |
1457 | 0 | } |
1458 | 0 | |
1459 | 0 | nsAutoCString table(aUpdate->TableName()); |
1460 | 0 | LOG(("Classifier::UpdateCache(%s)", table.get())); |
1461 | 0 |
|
1462 | 0 | RefPtr<LookupCache> lookupCache = GetLookupCache(table); |
1463 | 0 | if (!lookupCache) { |
1464 | 0 | return NS_ERROR_FAILURE; |
1465 | 0 | } |
1466 | 0 | |
1467 | 0 | RefPtr<LookupCacheV2> lookupV2 = LookupCache::Cast<LookupCacheV2>(lookupCache); |
1468 | 0 | if (lookupV2) { |
1469 | 0 | RefPtr<const TableUpdateV2> updateV2 = TableUpdate::Cast<TableUpdateV2>(aUpdate); |
1470 | 0 | lookupV2->AddGethashResultToCache(updateV2->AddCompletes(), |
1471 | 0 | updateV2->MissPrefixes()); |
1472 | 0 | } else { |
1473 | 0 | RefPtr<LookupCacheV4> lookupV4 = LookupCache::Cast<LookupCacheV4>(lookupCache); |
1474 | 0 | if (!lookupV4) { |
1475 | 0 | return NS_ERROR_FAILURE; |
1476 | 0 | } |
1477 | 0 | |
1478 | 0 | RefPtr<const TableUpdateV4> updateV4 = TableUpdate::Cast<TableUpdateV4>(aUpdate); |
1479 | 0 | lookupV4->AddFullHashResponseToCache(updateV4->FullHashResponse()); |
1480 | 0 | } |
1481 | 0 |
|
1482 | | #if defined(DEBUG) |
1483 | | lookupCache->DumpCache(); |
1484 | | #endif |
1485 | |
|
1486 | 0 | return NS_OK; |
1487 | 0 | } |
1488 | | |
1489 | | RefPtr<LookupCache> |
1490 | | Classifier::GetLookupCache(const nsACString& aTable, bool aForUpdate) |
1491 | 0 | { |
1492 | 0 | // GetLookupCache(aForUpdate==true) can only be called on update thread. |
1493 | 0 | MOZ_ASSERT_IF(aForUpdate, NS_GetCurrentThread() == mUpdateThread); |
1494 | 0 |
|
1495 | 0 | LookupCacheArray& lookupCaches = aForUpdate ? mNewLookupCaches |
1496 | 0 | : mLookupCaches; |
1497 | 0 | auto& rootStoreDirectory = aForUpdate ? mUpdatingDirectory |
1498 | 0 | : mRootStoreDirectory; |
1499 | 0 |
|
1500 | 0 | for (auto c: lookupCaches) { |
1501 | 0 | if (c->TableName().Equals(aTable)) { |
1502 | 0 | return c; |
1503 | 0 | } |
1504 | 0 | } |
1505 | 0 |
|
1506 | 0 | // We don't want to create lookupcache when shutdown is already happening. |
1507 | 0 | if (nsUrlClassifierDBService::ShutdownHasStarted()) { |
1508 | 0 | return nullptr; |
1509 | 0 | } |
1510 | 0 | |
1511 | 0 | // TODO : Bug 1302600, It would be better if we have a more general non-main |
1512 | 0 | // thread method to convert table name to protocol version. Currently |
1513 | 0 | // we can only know this by checking if the table name ends with '-proto'. |
1514 | 0 | RefPtr<LookupCache> cache; |
1515 | 0 | nsCString provider = GetProvider(aTable); |
1516 | 0 | if (StringEndsWith(aTable, NS_LITERAL_CSTRING("-proto"))) { |
1517 | 0 | cache = new LookupCacheV4(aTable, provider, rootStoreDirectory); |
1518 | 0 | } else { |
1519 | 0 | cache = new LookupCacheV2(aTable, provider, rootStoreDirectory); |
1520 | 0 | } |
1521 | 0 |
|
1522 | 0 | nsresult rv = cache->Init(); |
1523 | 0 | if (NS_FAILED(rv)) { |
1524 | 0 | return nullptr; |
1525 | 0 | } |
1526 | 0 | rv = cache->Open(); |
1527 | 0 | if (NS_SUCCEEDED(rv)) { |
1528 | 0 | lookupCaches.AppendElement(cache); |
1529 | 0 | return cache; |
1530 | 0 | } |
1531 | 0 | |
1532 | 0 | // At this point we failed to open LookupCache. |
1533 | 0 | // |
1534 | 0 | // GetLookupCache for update and for other usage will run on update thread |
1535 | 0 | // and worker thread respectively (Bug 1339760). Removing stuff only in |
1536 | 0 | // their own realms potentially increases the concurrency. |
1537 | 0 | |
1538 | 0 | if (aForUpdate) { |
1539 | 0 | // Remove intermediaries no matter if it's due to file corruption or not. |
1540 | 0 | RemoveUpdateIntermediaries(); |
1541 | 0 | return nullptr; |
1542 | 0 | } |
1543 | 0 | |
1544 | 0 | // Non-update case. |
1545 | 0 | if (rv == NS_ERROR_FILE_CORRUPTED) { |
1546 | 0 | Reset(); // Not including the update intermediaries. |
1547 | 0 | } |
1548 | 0 | return nullptr; |
1549 | 0 | } |
1550 | | |
1551 | | nsresult |
1552 | | Classifier::ReadNoiseEntries(const Prefix& aPrefix, |
1553 | | const nsACString& aTableName, |
1554 | | uint32_t aCount, |
1555 | | PrefixArray& aNoiseEntries) |
1556 | 0 | { |
1557 | 0 | FallibleTArray<uint32_t> prefixes; |
1558 | 0 | nsresult rv; |
1559 | 0 |
|
1560 | 0 | RefPtr<LookupCache> cache = GetLookupCache(aTableName); |
1561 | 0 | if (!cache) { |
1562 | 0 | return NS_ERROR_FAILURE; |
1563 | 0 | } |
1564 | 0 | |
1565 | 0 | RefPtr<LookupCacheV2> cacheV2 = LookupCache::Cast<LookupCacheV2>(cache); |
1566 | 0 | if (cacheV2) { |
1567 | 0 | rv = cacheV2->GetPrefixes(prefixes); |
1568 | 0 | } else { |
1569 | 0 | rv = LookupCache::Cast<LookupCacheV4>(cache)->GetFixedLengthPrefixes(prefixes); |
1570 | 0 | } |
1571 | 0 |
|
1572 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1573 | 0 |
|
1574 | 0 | if (prefixes.Length() == 0) { |
1575 | 0 | NS_WARNING("Could not find prefix in PrefixSet during noise lookup"); |
1576 | 0 | return NS_ERROR_FAILURE; |
1577 | 0 | } |
1578 | 0 |
|
1579 | 0 | // We do not want to simply pick random prefixes, because this would allow |
1580 | 0 | // averaging out the noise by analysing the traffic from Firefox users. |
1581 | 0 | // Instead, we ensure the 'noise' is the same for the same prefix by seeding |
1582 | 0 | // the random number generator with the prefix. We prefer not to use rand() |
1583 | 0 | // which isn't thread safe, and the reseeding of which could trip up other |
1584 | 0 | // parts othe code that expect actual random numbers. |
1585 | 0 | // Here we use a simple LCG (Linear Congruential Generator) to generate |
1586 | 0 | // random numbers. We seed the LCG with the prefix we are generating noise |
1587 | 0 | // for. |
1588 | 0 | // http://en.wikipedia.org/wiki/Linear_congruential_generator |
1589 | 0 |
|
1590 | 0 | uint32_t m = prefixes.Length(); |
1591 | 0 | uint32_t a = aCount % m; |
1592 | 0 | uint32_t idx = aPrefix.ToUint32() % m; |
1593 | 0 |
|
1594 | 0 | for (size_t i = 0; i < aCount; i++) { |
1595 | 0 | idx = (a * idx + a) % m; |
1596 | 0 |
|
1597 | 0 | Prefix newPrefix; |
1598 | 0 | uint32_t hash = prefixes[idx]; |
1599 | 0 | // In the case V4 little endian, we did swapping endian when converting from char* to |
1600 | 0 | // int, should revert endian to make sure we will send hex string correctly |
1601 | 0 | // See https://bugzilla.mozilla.org/show_bug.cgi?id=1283007#c23 |
1602 | 0 | if (!cacheV2 && !bool(MOZ_BIG_ENDIAN)) { |
1603 | 0 | hash = NativeEndian::swapFromBigEndian(prefixes[idx]); |
1604 | 0 | } |
1605 | 0 |
|
1606 | 0 | newPrefix.FromUint32(hash); |
1607 | 0 | if (newPrefix != aPrefix) { |
1608 | 0 | aNoiseEntries.AppendElement(newPrefix); |
1609 | 0 | } |
1610 | 0 | } |
1611 | 0 |
|
1612 | 0 | return NS_OK; |
1613 | 0 | } |
1614 | | |
1615 | | nsresult |
1616 | | Classifier::LoadMetadata(nsIFile* aDirectory, nsACString& aResult) |
1617 | 0 | { |
1618 | 0 | nsCOMPtr<nsIDirectoryEnumerator> entries; |
1619 | 0 | nsresult rv = aDirectory->GetDirectoryEntries(getter_AddRefs(entries)); |
1620 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1621 | 0 | NS_ENSURE_ARG_POINTER(entries); |
1622 | 0 |
|
1623 | 0 | nsCOMPtr<nsIFile> file; |
1624 | 0 | while (NS_SUCCEEDED(rv = entries->GetNextFile(getter_AddRefs(file))) && file) { |
1625 | 0 | // If |file| is a directory, recurse to find its entries as well. |
1626 | 0 | bool isDirectory; |
1627 | 0 | if (NS_FAILED(file->IsDirectory(&isDirectory))) { |
1628 | 0 | continue; |
1629 | 0 | } |
1630 | 0 | if (isDirectory) { |
1631 | 0 | LoadMetadata(file, aResult); |
1632 | 0 | continue; |
1633 | 0 | } |
1634 | 0 | |
1635 | 0 | // Truncate file extension to get the table name. |
1636 | 0 | nsCString tableName; |
1637 | 0 | rv = file->GetNativeLeafName(tableName); |
1638 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1639 | 0 |
|
1640 | 0 | int32_t dot = tableName.RFind(METADATA_SUFFIX); |
1641 | 0 | if (dot == -1) { |
1642 | 0 | continue; |
1643 | 0 | } |
1644 | 0 | tableName.Cut(dot, METADATA_SUFFIX.Length()); |
1645 | 0 |
|
1646 | 0 | RefPtr<LookupCacheV4> lookupCacheV4; |
1647 | 0 | { |
1648 | 0 | RefPtr<LookupCache> lookupCache = GetLookupCache(tableName); |
1649 | 0 | if (lookupCache) { |
1650 | 0 | lookupCacheV4 = LookupCache::Cast<LookupCacheV4>(lookupCache); |
1651 | 0 | } |
1652 | 0 | } |
1653 | 0 | if (!lookupCacheV4) { |
1654 | 0 | continue; |
1655 | 0 | } |
1656 | 0 | |
1657 | 0 | nsCString state; |
1658 | 0 | nsCString checksum; |
1659 | 0 | rv = lookupCacheV4->LoadMetadata(state, checksum); |
1660 | 0 | if (NS_FAILED(rv)) { |
1661 | 0 | LOG(("Failed to get metadata for table %s", tableName.get())); |
1662 | 0 | continue; |
1663 | 0 | } |
1664 | 0 |
|
1665 | 0 | // The state might include '\n' so that we have to encode. |
1666 | 0 | nsAutoCString stateBase64; |
1667 | 0 | rv = Base64Encode(state, stateBase64); |
1668 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1669 | 0 |
|
1670 | 0 | nsAutoCString checksumBase64; |
1671 | 0 | rv = Base64Encode(checksum, checksumBase64); |
1672 | 0 | NS_ENSURE_SUCCESS(rv, rv); |
1673 | 0 |
|
1674 | 0 | LOG(("Appending state '%s' and checksum '%s' for table %s", |
1675 | 0 | stateBase64.get(), checksumBase64.get(), tableName.get())); |
1676 | 0 |
|
1677 | 0 | aResult.AppendPrintf("%s;%s:%s\n", tableName.get(), |
1678 | 0 | stateBase64.get(), |
1679 | 0 | checksumBase64.get()); |
1680 | 0 | } |
1681 | 0 |
|
1682 | 0 | return rv; |
1683 | 0 | } |
1684 | | |
1685 | | } // namespace safebrowsing |
1686 | | } // namespace mozilla |