2014-06-24 00:42:43 +02:00
|
|
|
/*******************************************************************************
|
|
|
|
|
2015-12-29 17:34:41 +01:00
|
|
|
uBlock Origin - a browser extension to block requests.
|
2018-12-14 17:01:21 +01:00
|
|
|
Copyright (C) 2014-present Raymond Hill
|
2014-06-24 00:42:43 +02:00
|
|
|
|
|
|
|
This program is free software: you can redistribute it and/or modify
|
|
|
|
it under the terms of the GNU General Public License as published by
|
|
|
|
the Free Software Foundation, either version 3 of the License, or
|
|
|
|
(at your option) any later version.
|
|
|
|
|
|
|
|
This program is distributed in the hope that it will be useful,
|
|
|
|
but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
GNU General Public License for more details.
|
|
|
|
|
|
|
|
You should have received a copy of the GNU General Public License
|
|
|
|
along with this program. If not, see {http://www.gnu.org/licenses/}.
|
|
|
|
|
|
|
|
Home: https://github.com/gorhill/uBlock
|
|
|
|
*/
|
|
|
|
|
2014-10-19 13:11:27 +02:00
|
|
|
'use strict';
|
2014-06-24 00:42:43 +02:00
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
|
2015-12-29 17:34:41 +01:00
|
|
|
// A standalone URL tokenizer will allow us to use URL tokens in more than
|
|
|
|
// just static filtering engine. This opens the door to optimize other
|
|
|
|
// filtering engine parts aside static filtering. This also allows:
|
|
|
|
// - Tokenize only on demand.
|
|
|
|
// - To potentially avoid tokenizing when same URL is fed to tokenizer.
|
|
|
|
// - Benchmarking shows this to be a common occurrence.
|
2017-05-19 14:45:19 +02:00
|
|
|
//
|
|
|
|
// https://github.com/gorhill/uBlock/issues/2630
|
2017-05-20 02:22:26 +02:00
|
|
|
// Slice input URL into a list of safe-integer token values, instead of a list
|
2017-05-19 14:45:19 +02:00
|
|
|
// of substrings. The assumption is that with dealing only with numeric
|
|
|
|
// values, less underlying memory allocations, and also as a consequence
|
|
|
|
// less work for the garbage collector down the road.
|
|
|
|
// Another assumption is that using a numeric-based key value for Map() is
|
|
|
|
// more efficient than string-based key value (but that is something I would
|
|
|
|
// have to benchmark).
|
2017-05-20 02:22:26 +02:00
|
|
|
// Benchmark for string-based tokens vs. safe-integer token values:
|
2017-05-19 14:45:19 +02:00
|
|
|
// https://gorhill.github.io/obj-vs-set-vs-map/tokenize-to-str-vs-to-int.html
|
2015-12-29 17:34:41 +01:00
|
|
|
|
Add HNTrie-based filter classes to store origin-only filters
Related issue:
- https://github.com/uBlockOrigin/uBlock-issues/issues/528#issuecomment-484408622
Following STrie-related work in above issue, I noticed that a large
number of filters in EasyList were filters which only had to match
against the document origin. For instance, among just the top 10
most populous buckets, there were four such buckets with over
hundreds of entries each:
- bits: 72, token: "http", 146 entries
- bits: 72, token: "https", 139 entries
- bits: 88, token: "http", 122 entries
- bits: 88, token: "https", 118 entries
These filters in these buckets have to be matched against all
the network requests.
In order to leverage HNTrie for these filters[1], they are now handled
in a special way so as to ensure they all end up in a single HNTrie
(per bucket), which means that instead of scanning hundreds of entries
per URL, there is now a single scan per bucket per URL for these
apply-everywhere filters.
Now, any filter which fulfill ALL the following condition will be
processed in a special manner internally:
- Is of the form `|https://` or `|http://` or `*`; and
- Does have a `domain=` option; and
- Does not have a negated domain in its `domain=` option; and
- Does not have `csp=` option; and
- Does not have a `redirect=` option
If a filter does not fulfill ALL the conditions above, no change
in behavior.
A filter which matches ALL of the above will be processed in a special
manner:
- The `domain=` option will be decomposed so as to create as many
distinct filter as there is distinct value in the `domain=` option
- This also apply to the `badfilter` version of the filter, which
means it now become possible to `badfilter` only one of the
distinct filter without having to `badfilter` all of them.
- The logger will always report these special filters with only a
single hostname in the `domain=` option.
***
[1] HNTrie is currently WASM-ed on Firefox.
2019-04-19 22:33:46 +02:00
|
|
|
µBlock.urlTokenizer = new (class {
|
|
|
|
constructor() {
|
|
|
|
this._chars = '0123456789%abcdefghijklmnopqrstuvwxyz';
|
|
|
|
this._validTokenChars = new Uint8Array(128);
|
|
|
|
for ( let i = 0, n = this._chars.length; i < n; i++ ) {
|
|
|
|
this._validTokenChars[this._chars.charCodeAt(i)] = i + 1;
|
|
|
|
}
|
|
|
|
|
2019-04-28 16:15:15 +02:00
|
|
|
// Four upper bits of token hash are reserved for built-in predefined
|
|
|
|
// token hashes, which should never end up being used when tokenizing
|
|
|
|
// any arbitrary string.
|
|
|
|
this.dotTokenHash = 0x10000000;
|
|
|
|
this.anyTokenHash = 0x20000000;
|
|
|
|
this.anyHTTPSTokenHash = 0x30000000;
|
|
|
|
this.anyHTTPTokenHash = 0x40000000;
|
|
|
|
this.noTokenHash = 0x50000000;
|
|
|
|
this.emptyTokenHash = 0xF0000000;
|
Add HNTrie-based filter classes to store origin-only filters
Related issue:
- https://github.com/uBlockOrigin/uBlock-issues/issues/528#issuecomment-484408622
Following STrie-related work in above issue, I noticed that a large
number of filters in EasyList were filters which only had to match
against the document origin. For instance, among just the top 10
most populous buckets, there were four such buckets with over
hundreds of entries each:
- bits: 72, token: "http", 146 entries
- bits: 72, token: "https", 139 entries
- bits: 88, token: "http", 122 entries
- bits: 88, token: "https", 118 entries
These filters in these buckets have to be matched against all
the network requests.
In order to leverage HNTrie for these filters[1], they are now handled
in a special way so as to ensure they all end up in a single HNTrie
(per bucket), which means that instead of scanning hundreds of entries
per URL, there is now a single scan per bucket per URL for these
apply-everywhere filters.
Now, any filter which fulfill ALL the following condition will be
processed in a special manner internally:
- Is of the form `|https://` or `|http://` or `*`; and
- Does have a `domain=` option; and
- Does not have a negated domain in its `domain=` option; and
- Does not have `csp=` option; and
- Does not have a `redirect=` option
If a filter does not fulfill ALL the conditions above, no change
in behavior.
A filter which matches ALL of the above will be processed in a special
manner:
- The `domain=` option will be decomposed so as to create as many
distinct filter as there is distinct value in the `domain=` option
- This also apply to the `badfilter` version of the filter, which
means it now become possible to `badfilter` only one of the
distinct filter without having to `badfilter` all of them.
- The logger will always report these special filters with only a
single hostname in the `domain=` option.
***
[1] HNTrie is currently WASM-ed on Firefox.
2019-04-19 22:33:46 +02:00
|
|
|
|
|
|
|
this._urlIn = '';
|
|
|
|
this._urlOut = '';
|
|
|
|
this._tokenized = false;
|
|
|
|
this._tokens = [ 0 ];
|
2019-04-26 23:14:00 +02:00
|
|
|
|
|
|
|
this.knownTokens = new Uint8Array(65536);
|
|
|
|
this.resetKnownTokens();
|
Add HNTrie-based filter classes to store origin-only filters
Related issue:
- https://github.com/uBlockOrigin/uBlock-issues/issues/528#issuecomment-484408622
Following STrie-related work in above issue, I noticed that a large
number of filters in EasyList were filters which only had to match
against the document origin. For instance, among just the top 10
most populous buckets, there were four such buckets with over
hundreds of entries each:
- bits: 72, token: "http", 146 entries
- bits: 72, token: "https", 139 entries
- bits: 88, token: "http", 122 entries
- bits: 88, token: "https", 118 entries
These filters in these buckets have to be matched against all
the network requests.
In order to leverage HNTrie for these filters[1], they are now handled
in a special way so as to ensure they all end up in a single HNTrie
(per bucket), which means that instead of scanning hundreds of entries
per URL, there is now a single scan per bucket per URL for these
apply-everywhere filters.
Now, any filter which fulfill ALL the following condition will be
processed in a special manner internally:
- Is of the form `|https://` or `|http://` or `*`; and
- Does have a `domain=` option; and
- Does not have a negated domain in its `domain=` option; and
- Does not have `csp=` option; and
- Does not have a `redirect=` option
If a filter does not fulfill ALL the conditions above, no change
in behavior.
A filter which matches ALL of the above will be processed in a special
manner:
- The `domain=` option will be decomposed so as to create as many
distinct filter as there is distinct value in the `domain=` option
- This also apply to the `badfilter` version of the filter, which
means it now become possible to `badfilter` only one of the
distinct filter without having to `badfilter` all of them.
- The logger will always report these special filters with only a
single hostname in the `domain=` option.
***
[1] HNTrie is currently WASM-ed on Firefox.
2019-04-19 22:33:46 +02:00
|
|
|
}
|
|
|
|
|
|
|
|
setURL(url) {
|
2015-12-29 17:34:41 +01:00
|
|
|
if ( url !== this._urlIn ) {
|
|
|
|
this._urlIn = url;
|
|
|
|
this._urlOut = url.toLowerCase();
|
|
|
|
this._tokenized = false;
|
|
|
|
}
|
|
|
|
return this._urlOut;
|
Add HNTrie-based filter classes to store origin-only filters
Related issue:
- https://github.com/uBlockOrigin/uBlock-issues/issues/528#issuecomment-484408622
Following STrie-related work in above issue, I noticed that a large
number of filters in EasyList were filters which only had to match
against the document origin. For instance, among just the top 10
most populous buckets, there were four such buckets with over
hundreds of entries each:
- bits: 72, token: "http", 146 entries
- bits: 72, token: "https", 139 entries
- bits: 88, token: "http", 122 entries
- bits: 88, token: "https", 118 entries
These filters in these buckets have to be matched against all
the network requests.
In order to leverage HNTrie for these filters[1], they are now handled
in a special way so as to ensure they all end up in a single HNTrie
(per bucket), which means that instead of scanning hundreds of entries
per URL, there is now a single scan per bucket per URL for these
apply-everywhere filters.
Now, any filter which fulfill ALL the following condition will be
processed in a special manner internally:
- Is of the form `|https://` or `|http://` or `*`; and
- Does have a `domain=` option; and
- Does not have a negated domain in its `domain=` option; and
- Does not have `csp=` option; and
- Does not have a `redirect=` option
If a filter does not fulfill ALL the conditions above, no change
in behavior.
A filter which matches ALL of the above will be processed in a special
manner:
- The `domain=` option will be decomposed so as to create as many
distinct filter as there is distinct value in the `domain=` option
- This also apply to the `badfilter` version of the filter, which
means it now become possible to `badfilter` only one of the
distinct filter without having to `badfilter` all of them.
- The logger will always report these special filters with only a
single hostname in the `domain=` option.
***
[1] HNTrie is currently WASM-ed on Firefox.
2019-04-19 22:33:46 +02:00
|
|
|
}
|
2015-12-29 17:34:41 +01:00
|
|
|
|
2019-04-26 23:14:00 +02:00
|
|
|
resetKnownTokens() {
|
|
|
|
this.knownTokens.fill(0);
|
Increase resolution of known-token lookup table
Related commit:
- https://github.com/gorhill/uBlock/commit/69a43e07c4bc017f3320a669c1e80147c17dddcf
Using 32 bits of token hash rather than just the 16 lower
bits does help discard more unknown tokens.
Using the default filter lists, the known-token lookup
table is populated by 12,276 entries, out of 65,536, thus
making the case that theoretically there is a lot of
possible tokens which can be discarded.
In practice, running the built-in
staticNetFilteringEngine.benchmark() with default filter
lists, I find that 1,518,929 tokens were skipped out of
4,441,891 extracted tokens, or 34%.
2019-04-27 14:18:01 +02:00
|
|
|
this.addKnownToken(this.dotTokenHash);
|
|
|
|
this.addKnownToken(this.anyTokenHash);
|
|
|
|
this.addKnownToken(this.anyHTTPSTokenHash);
|
|
|
|
this.addKnownToken(this.anyHTTPTokenHash);
|
|
|
|
this.addKnownToken(this.noTokenHash);
|
|
|
|
}
|
|
|
|
|
|
|
|
addKnownToken(th) {
|
|
|
|
this.knownTokens[th & 0xFFFF ^ th >>> 16] = 1;
|
2019-04-26 23:14:00 +02:00
|
|
|
}
|
|
|
|
|
2015-12-29 17:34:41 +01:00
|
|
|
// Tokenize on demand.
|
Add HNTrie-based filter classes to store origin-only filters
Related issue:
- https://github.com/uBlockOrigin/uBlock-issues/issues/528#issuecomment-484408622
Following STrie-related work in above issue, I noticed that a large
number of filters in EasyList were filters which only had to match
against the document origin. For instance, among just the top 10
most populous buckets, there were four such buckets with over
hundreds of entries each:
- bits: 72, token: "http", 146 entries
- bits: 72, token: "https", 139 entries
- bits: 88, token: "http", 122 entries
- bits: 88, token: "https", 118 entries
These filters in these buckets have to be matched against all
the network requests.
In order to leverage HNTrie for these filters[1], they are now handled
in a special way so as to ensure they all end up in a single HNTrie
(per bucket), which means that instead of scanning hundreds of entries
per URL, there is now a single scan per bucket per URL for these
apply-everywhere filters.
Now, any filter which fulfill ALL the following condition will be
processed in a special manner internally:
- Is of the form `|https://` or `|http://` or `*`; and
- Does have a `domain=` option; and
- Does not have a negated domain in its `domain=` option; and
- Does not have `csp=` option; and
- Does not have a `redirect=` option
If a filter does not fulfill ALL the conditions above, no change
in behavior.
A filter which matches ALL of the above will be processed in a special
manner:
- The `domain=` option will be decomposed so as to create as many
distinct filter as there is distinct value in the `domain=` option
- This also apply to the `badfilter` version of the filter, which
means it now become possible to `badfilter` only one of the
distinct filter without having to `badfilter` all of them.
- The logger will always report these special filters with only a
single hostname in the `domain=` option.
***
[1] HNTrie is currently WASM-ed on Firefox.
2019-04-19 22:33:46 +02:00
|
|
|
getTokens() {
|
|
|
|
if ( this._tokenized ) { return this._tokens; }
|
|
|
|
let i = this._tokenize();
|
|
|
|
i = this._appendTokenAt(i, this.anyTokenHash, 0);
|
|
|
|
if ( this._urlOut.startsWith('https://') ) {
|
|
|
|
i = this._appendTokenAt(i, this.anyHTTPSTokenHash, 0);
|
|
|
|
} else if ( this._urlOut.startsWith('http://') ) {
|
|
|
|
i = this._appendTokenAt(i, this.anyHTTPTokenHash, 0);
|
2015-12-29 17:34:41 +01:00
|
|
|
}
|
Add HNTrie-based filter classes to store origin-only filters
Related issue:
- https://github.com/uBlockOrigin/uBlock-issues/issues/528#issuecomment-484408622
Following STrie-related work in above issue, I noticed that a large
number of filters in EasyList were filters which only had to match
against the document origin. For instance, among just the top 10
most populous buckets, there were four such buckets with over
hundreds of entries each:
- bits: 72, token: "http", 146 entries
- bits: 72, token: "https", 139 entries
- bits: 88, token: "http", 122 entries
- bits: 88, token: "https", 118 entries
These filters in these buckets have to be matched against all
the network requests.
In order to leverage HNTrie for these filters[1], they are now handled
in a special way so as to ensure they all end up in a single HNTrie
(per bucket), which means that instead of scanning hundreds of entries
per URL, there is now a single scan per bucket per URL for these
apply-everywhere filters.
Now, any filter which fulfill ALL the following condition will be
processed in a special manner internally:
- Is of the form `|https://` or `|http://` or `*`; and
- Does have a `domain=` option; and
- Does not have a negated domain in its `domain=` option; and
- Does not have `csp=` option; and
- Does not have a `redirect=` option
If a filter does not fulfill ALL the conditions above, no change
in behavior.
A filter which matches ALL of the above will be processed in a special
manner:
- The `domain=` option will be decomposed so as to create as many
distinct filter as there is distinct value in the `domain=` option
- This also apply to the `badfilter` version of the filter, which
means it now become possible to `badfilter` only one of the
distinct filter without having to `badfilter` all of them.
- The logger will always report these special filters with only a
single hostname in the `domain=` option.
***
[1] HNTrie is currently WASM-ed on Firefox.
2019-04-19 22:33:46 +02:00
|
|
|
i = this._appendTokenAt(i, this.noTokenHash, 0);
|
|
|
|
this._tokens[i] = 0;
|
|
|
|
this._tokenized = true;
|
2015-12-29 17:34:41 +01:00
|
|
|
return this._tokens;
|
Add HNTrie-based filter classes to store origin-only filters
Related issue:
- https://github.com/uBlockOrigin/uBlock-issues/issues/528#issuecomment-484408622
Following STrie-related work in above issue, I noticed that a large
number of filters in EasyList were filters which only had to match
against the document origin. For instance, among just the top 10
most populous buckets, there were four such buckets with over
hundreds of entries each:
- bits: 72, token: "http", 146 entries
- bits: 72, token: "https", 139 entries
- bits: 88, token: "http", 122 entries
- bits: 88, token: "https", 118 entries
These filters in these buckets have to be matched against all
the network requests.
In order to leverage HNTrie for these filters[1], they are now handled
in a special way so as to ensure they all end up in a single HNTrie
(per bucket), which means that instead of scanning hundreds of entries
per URL, there is now a single scan per bucket per URL for these
apply-everywhere filters.
Now, any filter which fulfill ALL the following condition will be
processed in a special manner internally:
- Is of the form `|https://` or `|http://` or `*`; and
- Does have a `domain=` option; and
- Does not have a negated domain in its `domain=` option; and
- Does not have `csp=` option; and
- Does not have a `redirect=` option
If a filter does not fulfill ALL the conditions above, no change
in behavior.
A filter which matches ALL of the above will be processed in a special
manner:
- The `domain=` option will be decomposed so as to create as many
distinct filter as there is distinct value in the `domain=` option
- This also apply to the `badfilter` version of the filter, which
means it now become possible to `badfilter` only one of the
distinct filter without having to `badfilter` all of them.
- The logger will always report these special filters with only a
single hostname in the `domain=` option.
***
[1] HNTrie is currently WASM-ed on Firefox.
2019-04-19 22:33:46 +02:00
|
|
|
}
|
2015-12-29 17:34:41 +01:00
|
|
|
|
Add HNTrie-based filter classes to store origin-only filters
Related issue:
- https://github.com/uBlockOrigin/uBlock-issues/issues/528#issuecomment-484408622
Following STrie-related work in above issue, I noticed that a large
number of filters in EasyList were filters which only had to match
against the document origin. For instance, among just the top 10
most populous buckets, there were four such buckets with over
hundreds of entries each:
- bits: 72, token: "http", 146 entries
- bits: 72, token: "https", 139 entries
- bits: 88, token: "http", 122 entries
- bits: 88, token: "https", 118 entries
These filters in these buckets have to be matched against all
the network requests.
In order to leverage HNTrie for these filters[1], they are now handled
in a special way so as to ensure they all end up in a single HNTrie
(per bucket), which means that instead of scanning hundreds of entries
per URL, there is now a single scan per bucket per URL for these
apply-everywhere filters.
Now, any filter which fulfill ALL the following condition will be
processed in a special manner internally:
- Is of the form `|https://` or `|http://` or `*`; and
- Does have a `domain=` option; and
- Does not have a negated domain in its `domain=` option; and
- Does not have `csp=` option; and
- Does not have a `redirect=` option
If a filter does not fulfill ALL the conditions above, no change
in behavior.
A filter which matches ALL of the above will be processed in a special
manner:
- The `domain=` option will be decomposed so as to create as many
distinct filter as there is distinct value in the `domain=` option
- This also apply to the `badfilter` version of the filter, which
means it now become possible to `badfilter` only one of the
distinct filter without having to `badfilter` all of them.
- The logger will always report these special filters with only a
single hostname in the `domain=` option.
***
[1] HNTrie is currently WASM-ed on Firefox.
2019-04-19 22:33:46 +02:00
|
|
|
tokenHashFromString(s) {
|
|
|
|
const l = s.length;
|
2019-04-28 16:15:15 +02:00
|
|
|
if ( l === 0 ) { return this.emptyTokenHash; }
|
|
|
|
const vtc = this._validTokenChars;
|
Add HNTrie-based filter classes to store origin-only filters
Related issue:
- https://github.com/uBlockOrigin/uBlock-issues/issues/528#issuecomment-484408622
Following STrie-related work in above issue, I noticed that a large
number of filters in EasyList were filters which only had to match
against the document origin. For instance, among just the top 10
most populous buckets, there were four such buckets with over
hundreds of entries each:
- bits: 72, token: "http", 146 entries
- bits: 72, token: "https", 139 entries
- bits: 88, token: "http", 122 entries
- bits: 88, token: "https", 118 entries
These filters in these buckets have to be matched against all
the network requests.
In order to leverage HNTrie for these filters[1], they are now handled
in a special way so as to ensure they all end up in a single HNTrie
(per bucket), which means that instead of scanning hundreds of entries
per URL, there is now a single scan per bucket per URL for these
apply-everywhere filters.
Now, any filter which fulfill ALL the following condition will be
processed in a special manner internally:
- Is of the form `|https://` or `|http://` or `*`; and
- Does have a `domain=` option; and
- Does not have a negated domain in its `domain=` option; and
- Does not have `csp=` option; and
- Does not have a `redirect=` option
If a filter does not fulfill ALL the conditions above, no change
in behavior.
A filter which matches ALL of the above will be processed in a special
manner:
- The `domain=` option will be decomposed so as to create as many
distinct filter as there is distinct value in the `domain=` option
- This also apply to the `badfilter` version of the filter, which
means it now become possible to `badfilter` only one of the
distinct filter without having to `badfilter` all of them.
- The logger will always report these special filters with only a
single hostname in the `domain=` option.
***
[1] HNTrie is currently WASM-ed on Firefox.
2019-04-19 22:33:46 +02:00
|
|
|
let th = vtc[s.charCodeAt(0)];
|
2019-04-28 16:15:15 +02:00
|
|
|
for ( let i = 1; i !== 7 && i !== l; i++ ) {
|
|
|
|
th = th << 4 ^ vtc[s.charCodeAt(i)];
|
2017-05-19 14:45:19 +02:00
|
|
|
}
|
|
|
|
return th;
|
Add HNTrie-based filter classes to store origin-only filters
Related issue:
- https://github.com/uBlockOrigin/uBlock-issues/issues/528#issuecomment-484408622
Following STrie-related work in above issue, I noticed that a large
number of filters in EasyList were filters which only had to match
against the document origin. For instance, among just the top 10
most populous buckets, there were four such buckets with over
hundreds of entries each:
- bits: 72, token: "http", 146 entries
- bits: 72, token: "https", 139 entries
- bits: 88, token: "http", 122 entries
- bits: 88, token: "https", 118 entries
These filters in these buckets have to be matched against all
the network requests.
In order to leverage HNTrie for these filters[1], they are now handled
in a special way so as to ensure they all end up in a single HNTrie
(per bucket), which means that instead of scanning hundreds of entries
per URL, there is now a single scan per bucket per URL for these
apply-everywhere filters.
Now, any filter which fulfill ALL the following condition will be
processed in a special manner internally:
- Is of the form `|https://` or `|http://` or `*`; and
- Does have a `domain=` option; and
- Does not have a negated domain in its `domain=` option; and
- Does not have `csp=` option; and
- Does not have a `redirect=` option
If a filter does not fulfill ALL the conditions above, no change
in behavior.
A filter which matches ALL of the above will be processed in a special
manner:
- The `domain=` option will be decomposed so as to create as many
distinct filter as there is distinct value in the `domain=` option
- This also apply to the `badfilter` version of the filter, which
means it now become possible to `badfilter` only one of the
distinct filter without having to `badfilter` all of them.
- The logger will always report these special filters with only a
single hostname in the `domain=` option.
***
[1] HNTrie is currently WASM-ed on Firefox.
2019-04-19 22:33:46 +02:00
|
|
|
}
|
2015-12-29 17:34:41 +01:00
|
|
|
|
Add HNTrie-based filter classes to store origin-only filters
Related issue:
- https://github.com/uBlockOrigin/uBlock-issues/issues/528#issuecomment-484408622
Following STrie-related work in above issue, I noticed that a large
number of filters in EasyList were filters which only had to match
against the document origin. For instance, among just the top 10
most populous buckets, there were four such buckets with over
hundreds of entries each:
- bits: 72, token: "http", 146 entries
- bits: 72, token: "https", 139 entries
- bits: 88, token: "http", 122 entries
- bits: 88, token: "https", 118 entries
These filters in these buckets have to be matched against all
the network requests.
In order to leverage HNTrie for these filters[1], they are now handled
in a special way so as to ensure they all end up in a single HNTrie
(per bucket), which means that instead of scanning hundreds of entries
per URL, there is now a single scan per bucket per URL for these
apply-everywhere filters.
Now, any filter which fulfill ALL the following condition will be
processed in a special manner internally:
- Is of the form `|https://` or `|http://` or `*`; and
- Does have a `domain=` option; and
- Does not have a negated domain in its `domain=` option; and
- Does not have `csp=` option; and
- Does not have a `redirect=` option
If a filter does not fulfill ALL the conditions above, no change
in behavior.
A filter which matches ALL of the above will be processed in a special
manner:
- The `domain=` option will be decomposed so as to create as many
distinct filter as there is distinct value in the `domain=` option
- This also apply to the `badfilter` version of the filter, which
means it now become possible to `badfilter` only one of the
distinct filter without having to `badfilter` all of them.
- The logger will always report these special filters with only a
single hostname in the `domain=` option.
***
[1] HNTrie is currently WASM-ed on Firefox.
2019-04-19 22:33:46 +02:00
|
|
|
stringFromTokenHash(th) {
|
2019-04-15 17:45:33 +02:00
|
|
|
if ( th === 0 ) { return ''; }
|
2019-04-28 16:15:15 +02:00
|
|
|
return th.toString(16);
|
Add HNTrie-based filter classes to store origin-only filters
Related issue:
- https://github.com/uBlockOrigin/uBlock-issues/issues/528#issuecomment-484408622
Following STrie-related work in above issue, I noticed that a large
number of filters in EasyList were filters which only had to match
against the document origin. For instance, among just the top 10
most populous buckets, there were four such buckets with over
hundreds of entries each:
- bits: 72, token: "http", 146 entries
- bits: 72, token: "https", 139 entries
- bits: 88, token: "http", 122 entries
- bits: 88, token: "https", 118 entries
These filters in these buckets have to be matched against all
the network requests.
In order to leverage HNTrie for these filters[1], they are now handled
in a special way so as to ensure they all end up in a single HNTrie
(per bucket), which means that instead of scanning hundreds of entries
per URL, there is now a single scan per bucket per URL for these
apply-everywhere filters.
Now, any filter which fulfill ALL the following condition will be
processed in a special manner internally:
- Is of the form `|https://` or `|http://` or `*`; and
- Does have a `domain=` option; and
- Does not have a negated domain in its `domain=` option; and
- Does not have `csp=` option; and
- Does not have a `redirect=` option
If a filter does not fulfill ALL the conditions above, no change
in behavior.
A filter which matches ALL of the above will be processed in a special
manner:
- The `domain=` option will be decomposed so as to create as many
distinct filter as there is distinct value in the `domain=` option
- This also apply to the `badfilter` version of the filter, which
means it now become possible to `badfilter` only one of the
distinct filter without having to `badfilter` all of them.
- The logger will always report these special filters with only a
single hostname in the `domain=` option.
***
[1] HNTrie is currently WASM-ed on Firefox.
2019-04-19 22:33:46 +02:00
|
|
|
}
|
2019-04-15 17:45:33 +02:00
|
|
|
|
2019-04-26 23:14:00 +02:00
|
|
|
toSelfie() {
|
|
|
|
return µBlock.base64.encode(
|
|
|
|
this.knownTokens.buffer,
|
|
|
|
this.knownTokens.byteLength
|
|
|
|
);
|
|
|
|
}
|
|
|
|
|
|
|
|
fromSelfie(selfie) {
|
|
|
|
return µBlock.base64.decode(selfie, this.knownTokens.buffer);
|
|
|
|
}
|
|
|
|
|
2017-05-26 14:31:19 +02:00
|
|
|
// https://github.com/chrisaljoudi/uBlock/issues/1118
|
|
|
|
// We limit to a maximum number of tokens.
|
|
|
|
|
2019-04-26 23:14:00 +02:00
|
|
|
_appendTokenAt(i, th, ti) {
|
|
|
|
this._tokens[i+0] = th;
|
|
|
|
this._tokens[i+1] = ti;
|
|
|
|
return i + 2;
|
|
|
|
}
|
|
|
|
|
Add HNTrie-based filter classes to store origin-only filters
Related issue:
- https://github.com/uBlockOrigin/uBlock-issues/issues/528#issuecomment-484408622
Following STrie-related work in above issue, I noticed that a large
number of filters in EasyList were filters which only had to match
against the document origin. For instance, among just the top 10
most populous buckets, there were four such buckets with over
hundreds of entries each:
- bits: 72, token: "http", 146 entries
- bits: 72, token: "https", 139 entries
- bits: 88, token: "http", 122 entries
- bits: 88, token: "https", 118 entries
These filters in these buckets have to be matched against all
the network requests.
In order to leverage HNTrie for these filters[1], they are now handled
in a special way so as to ensure they all end up in a single HNTrie
(per bucket), which means that instead of scanning hundreds of entries
per URL, there is now a single scan per bucket per URL for these
apply-everywhere filters.
Now, any filter which fulfill ALL the following condition will be
processed in a special manner internally:
- Is of the form `|https://` or `|http://` or `*`; and
- Does have a `domain=` option; and
- Does not have a negated domain in its `domain=` option; and
- Does not have `csp=` option; and
- Does not have a `redirect=` option
If a filter does not fulfill ALL the conditions above, no change
in behavior.
A filter which matches ALL of the above will be processed in a special
manner:
- The `domain=` option will be decomposed so as to create as many
distinct filter as there is distinct value in the `domain=` option
- This also apply to the `badfilter` version of the filter, which
means it now become possible to `badfilter` only one of the
distinct filter without having to `badfilter` all of them.
- The logger will always report these special filters with only a
single hostname in the `domain=` option.
***
[1] HNTrie is currently WASM-ed on Firefox.
2019-04-19 22:33:46 +02:00
|
|
|
_tokenize() {
|
|
|
|
const tokens = this._tokens;
|
|
|
|
let url = this._urlOut;
|
|
|
|
let l = url.length;
|
2019-04-28 16:15:15 +02:00
|
|
|
if ( l === 0 ) { return this.emptyTokenHash; }
|
2017-05-19 14:45:19 +02:00
|
|
|
if ( l > 2048 ) {
|
|
|
|
url = url.slice(0, 2048);
|
|
|
|
l = 2048;
|
2015-12-29 17:34:41 +01:00
|
|
|
}
|
2019-04-26 23:14:00 +02:00
|
|
|
const knownTokens = this.knownTokens;
|
Add HNTrie-based filter classes to store origin-only filters
Related issue:
- https://github.com/uBlockOrigin/uBlock-issues/issues/528#issuecomment-484408622
Following STrie-related work in above issue, I noticed that a large
number of filters in EasyList were filters which only had to match
against the document origin. For instance, among just the top 10
most populous buckets, there were four such buckets with over
hundreds of entries each:
- bits: 72, token: "http", 146 entries
- bits: 72, token: "https", 139 entries
- bits: 88, token: "http", 122 entries
- bits: 88, token: "https", 118 entries
These filters in these buckets have to be matched against all
the network requests.
In order to leverage HNTrie for these filters[1], they are now handled
in a special way so as to ensure they all end up in a single HNTrie
(per bucket), which means that instead of scanning hundreds of entries
per URL, there is now a single scan per bucket per URL for these
apply-everywhere filters.
Now, any filter which fulfill ALL the following condition will be
processed in a special manner internally:
- Is of the form `|https://` or `|http://` or `*`; and
- Does have a `domain=` option; and
- Does not have a negated domain in its `domain=` option; and
- Does not have `csp=` option; and
- Does not have a `redirect=` option
If a filter does not fulfill ALL the conditions above, no change
in behavior.
A filter which matches ALL of the above will be processed in a special
manner:
- The `domain=` option will be decomposed so as to create as many
distinct filter as there is distinct value in the `domain=` option
- This also apply to the `badfilter` version of the filter, which
means it now become possible to `badfilter` only one of the
distinct filter without having to `badfilter` all of them.
- The logger will always report these special filters with only a
single hostname in the `domain=` option.
***
[1] HNTrie is currently WASM-ed on Firefox.
2019-04-19 22:33:46 +02:00
|
|
|
const vtc = this._validTokenChars;
|
|
|
|
let i = 0, j = 0, v, n, ti, th;
|
2017-05-19 14:45:19 +02:00
|
|
|
for (;;) {
|
|
|
|
for (;;) {
|
Add HNTrie-based filter classes to store origin-only filters
Related issue:
- https://github.com/uBlockOrigin/uBlock-issues/issues/528#issuecomment-484408622
Following STrie-related work in above issue, I noticed that a large
number of filters in EasyList were filters which only had to match
against the document origin. For instance, among just the top 10
most populous buckets, there were four such buckets with over
hundreds of entries each:
- bits: 72, token: "http", 146 entries
- bits: 72, token: "https", 139 entries
- bits: 88, token: "http", 122 entries
- bits: 88, token: "https", 118 entries
These filters in these buckets have to be matched against all
the network requests.
In order to leverage HNTrie for these filters[1], they are now handled
in a special way so as to ensure they all end up in a single HNTrie
(per bucket), which means that instead of scanning hundreds of entries
per URL, there is now a single scan per bucket per URL for these
apply-everywhere filters.
Now, any filter which fulfill ALL the following condition will be
processed in a special manner internally:
- Is of the form `|https://` or `|http://` or `*`; and
- Does have a `domain=` option; and
- Does not have a negated domain in its `domain=` option; and
- Does not have `csp=` option; and
- Does not have a `redirect=` option
If a filter does not fulfill ALL the conditions above, no change
in behavior.
A filter which matches ALL of the above will be processed in a special
manner:
- The `domain=` option will be decomposed so as to create as many
distinct filter as there is distinct value in the `domain=` option
- This also apply to the `badfilter` version of the filter, which
means it now become possible to `badfilter` only one of the
distinct filter without having to `badfilter` all of them.
- The logger will always report these special filters with only a
single hostname in the `domain=` option.
***
[1] HNTrie is currently WASM-ed on Firefox.
2019-04-19 22:33:46 +02:00
|
|
|
if ( i === l ) { return j; }
|
2017-05-19 14:45:19 +02:00
|
|
|
v = vtc[url.charCodeAt(i++)];
|
|
|
|
if ( v !== 0 ) { break; }
|
|
|
|
}
|
|
|
|
th = v; ti = i - 1; n = 1;
|
|
|
|
for (;;) {
|
|
|
|
if ( i === l ) { break; }
|
|
|
|
v = vtc[url.charCodeAt(i++)];
|
|
|
|
if ( v === 0 ) { break; }
|
2019-04-28 16:15:15 +02:00
|
|
|
if ( n === 7 ) { continue; }
|
|
|
|
th = th << 4 ^ v;
|
2017-05-20 22:32:42 +02:00
|
|
|
n += 1;
|
2017-05-19 14:45:19 +02:00
|
|
|
}
|
Increase resolution of known-token lookup table
Related commit:
- https://github.com/gorhill/uBlock/commit/69a43e07c4bc017f3320a669c1e80147c17dddcf
Using 32 bits of token hash rather than just the 16 lower
bits does help discard more unknown tokens.
Using the default filter lists, the known-token lookup
table is populated by 12,276 entries, out of 65,536, thus
making the case that theoretically there is a lot of
possible tokens which can be discarded.
In practice, running the built-in
staticNetFilteringEngine.benchmark() with default filter
lists, I find that 1,518,929 tokens were skipped out of
4,441,891 extracted tokens, or 34%.
2019-04-27 14:18:01 +02:00
|
|
|
if ( knownTokens[th & 0xFFFF ^ th >>> 16] !== 0 ) {
|
2019-04-26 23:14:00 +02:00
|
|
|
tokens[j+0] = th;
|
|
|
|
tokens[j+1] = ti;
|
|
|
|
j += 2;
|
|
|
|
}
|
2017-05-19 14:45:19 +02:00
|
|
|
}
|
Add HNTrie-based filter classes to store origin-only filters
Related issue:
- https://github.com/uBlockOrigin/uBlock-issues/issues/528#issuecomment-484408622
Following STrie-related work in above issue, I noticed that a large
number of filters in EasyList were filters which only had to match
against the document origin. For instance, among just the top 10
most populous buckets, there were four such buckets with over
hundreds of entries each:
- bits: 72, token: "http", 146 entries
- bits: 72, token: "https", 139 entries
- bits: 88, token: "http", 122 entries
- bits: 88, token: "https", 118 entries
These filters in these buckets have to be matched against all
the network requests.
In order to leverage HNTrie for these filters[1], they are now handled
in a special way so as to ensure they all end up in a single HNTrie
(per bucket), which means that instead of scanning hundreds of entries
per URL, there is now a single scan per bucket per URL for these
apply-everywhere filters.
Now, any filter which fulfill ALL the following condition will be
processed in a special manner internally:
- Is of the form `|https://` or `|http://` or `*`; and
- Does have a `domain=` option; and
- Does not have a negated domain in its `domain=` option; and
- Does not have `csp=` option; and
- Does not have a `redirect=` option
If a filter does not fulfill ALL the conditions above, no change
in behavior.
A filter which matches ALL of the above will be processed in a special
manner:
- The `domain=` option will be decomposed so as to create as many
distinct filter as there is distinct value in the `domain=` option
- This also apply to the `badfilter` version of the filter, which
means it now become possible to `badfilter` only one of the
distinct filter without having to `badfilter` all of them.
- The logger will always report these special filters with only a
single hostname in the `domain=` option.
***
[1] HNTrie is currently WASM-ed on Firefox.
2019-04-19 22:33:46 +02:00
|
|
|
}
|
|
|
|
})();
|
2014-06-24 00:42:43 +02:00
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
|
2015-12-29 17:34:41 +01:00
|
|
|
µBlock.formatCount = function(count) {
|
2014-08-20 02:41:52 +02:00
|
|
|
if ( typeof count !== 'number' ) {
|
|
|
|
return '';
|
|
|
|
}
|
|
|
|
var s = count.toFixed(0);
|
|
|
|
if ( count >= 1000 ) {
|
|
|
|
if ( count < 10000 ) {
|
2014-12-24 14:11:22 +01:00
|
|
|
s = '>' + s.slice(0,1) + 'k';
|
2014-08-20 02:41:52 +02:00
|
|
|
} else if ( count < 100000 ) {
|
2014-12-24 14:11:22 +01:00
|
|
|
s = s.slice(0,2) + 'k';
|
2014-08-20 02:41:52 +02:00
|
|
|
} else if ( count < 1000000 ) {
|
2014-12-24 14:11:22 +01:00
|
|
|
s = s.slice(0,3) + 'k';
|
2014-08-20 02:41:52 +02:00
|
|
|
} else if ( count < 10000000 ) {
|
|
|
|
s = s.slice(0,1) + 'M';
|
|
|
|
} else {
|
|
|
|
s = s.slice(0,-6) + 'M';
|
|
|
|
}
|
|
|
|
}
|
|
|
|
return s;
|
2014-06-24 00:42:43 +02:00
|
|
|
};
|
|
|
|
|
2014-08-20 15:24:16 +02:00
|
|
|
// https://www.youtube.com/watch?v=DyvzfyqYm_s
|
2014-08-20 02:41:52 +02:00
|
|
|
|
|
|
|
/******************************************************************************/
|
2016-08-13 22:42:58 +02:00
|
|
|
|
2016-10-13 19:25:57 +02:00
|
|
|
µBlock.dateNowToSensibleString = function() {
|
|
|
|
var now = new Date(Date.now() - (new Date()).getTimezoneOffset() * 60000);
|
|
|
|
return now.toISOString().replace(/\.\d+Z$/, '')
|
|
|
|
.replace(/:/g, '.')
|
|
|
|
.replace('T', '_');
|
|
|
|
};
|
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
|
2016-08-13 22:42:58 +02:00
|
|
|
µBlock.LineIterator = function(text, offset) {
|
|
|
|
this.text = text;
|
2016-09-12 16:22:25 +02:00
|
|
|
this.textLen = this.text.length;
|
2016-08-13 22:42:58 +02:00
|
|
|
this.offset = offset || 0;
|
|
|
|
};
|
|
|
|
|
2017-05-12 16:35:11 +02:00
|
|
|
µBlock.LineIterator.prototype.next = function(offset) {
|
|
|
|
if ( offset !== undefined ) {
|
|
|
|
this.offset += offset;
|
|
|
|
}
|
2016-08-13 22:42:58 +02:00
|
|
|
var lineEnd = this.text.indexOf('\n', this.offset);
|
|
|
|
if ( lineEnd === -1 ) {
|
|
|
|
lineEnd = this.text.indexOf('\r', this.offset);
|
|
|
|
if ( lineEnd === -1 ) {
|
2016-09-12 16:22:25 +02:00
|
|
|
lineEnd = this.textLen;
|
2016-08-13 22:42:58 +02:00
|
|
|
}
|
|
|
|
}
|
|
|
|
var line = this.text.slice(this.offset, lineEnd);
|
|
|
|
this.offset = lineEnd + 1;
|
|
|
|
return line;
|
|
|
|
};
|
|
|
|
|
2017-05-12 16:35:11 +02:00
|
|
|
µBlock.LineIterator.prototype.charCodeAt = function(offset) {
|
|
|
|
return this.text.charCodeAt(this.offset + offset);
|
2017-03-11 19:55:47 +01:00
|
|
|
};
|
|
|
|
|
2016-08-13 22:42:58 +02:00
|
|
|
µBlock.LineIterator.prototype.eot = function() {
|
2016-09-12 16:22:25 +02:00
|
|
|
return this.offset >= this.textLen;
|
|
|
|
};
|
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
|
|
|
|
// The field iterator is less CPU-intensive than when using native
|
|
|
|
// String.split().
|
|
|
|
|
|
|
|
µBlock.FieldIterator = function(sep) {
|
|
|
|
this.text = '';
|
|
|
|
this.sep = sep;
|
|
|
|
this.sepLen = sep.length;
|
|
|
|
this.offset = 0;
|
|
|
|
};
|
|
|
|
|
|
|
|
µBlock.FieldIterator.prototype.first = function(text) {
|
|
|
|
this.text = text;
|
|
|
|
this.offset = 0;
|
|
|
|
return this.next();
|
|
|
|
};
|
|
|
|
|
|
|
|
µBlock.FieldIterator.prototype.next = function() {
|
|
|
|
var end = this.text.indexOf(this.sep, this.offset);
|
|
|
|
if ( end === -1 ) {
|
|
|
|
end = this.text.length;
|
|
|
|
}
|
|
|
|
var field = this.text.slice(this.offset, end);
|
|
|
|
this.offset = end + this.sepLen;
|
|
|
|
return field;
|
|
|
|
};
|
|
|
|
|
2017-05-12 16:35:11 +02:00
|
|
|
µBlock.FieldIterator.prototype.remainder = function() {
|
|
|
|
return this.text.slice(this.offset);
|
|
|
|
};
|
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
|
2018-10-23 19:01:08 +02:00
|
|
|
µBlock.CompiledLineIO = {
|
|
|
|
serialize: JSON.stringify,
|
|
|
|
unserialize: JSON.parse,
|
|
|
|
blockStartPrefix: '#block-start-', // ensure no special regex characters
|
|
|
|
blockEndPrefix: '#block-end-', // ensure no special regex characters
|
|
|
|
|
|
|
|
Writer: function() {
|
|
|
|
this.io = µBlock.CompiledLineIO;
|
|
|
|
this.blockId = undefined;
|
|
|
|
this.block = undefined;
|
|
|
|
this.stringifier = this.io.serialize;
|
2018-12-15 16:46:17 +01:00
|
|
|
this.blocks = new Map();
|
|
|
|
this.properties = new Map();
|
2018-10-23 19:01:08 +02:00
|
|
|
},
|
2017-05-12 16:35:11 +02:00
|
|
|
|
2018-10-23 19:01:08 +02:00
|
|
|
Reader: function(raw, blockId) {
|
|
|
|
this.io = µBlock.CompiledLineIO;
|
|
|
|
this.block = '';
|
|
|
|
this.len = 0;
|
|
|
|
this.offset = 0;
|
|
|
|
this.line = '';
|
|
|
|
this.parser = this.io.unserialize;
|
|
|
|
this.blocks = new Map();
|
2018-12-15 16:46:17 +01:00
|
|
|
this.properties = new Map();
|
2018-10-23 19:01:08 +02:00
|
|
|
let reBlockStart = new RegExp(
|
|
|
|
'^' + this.io.blockStartPrefix + '(\\d+)\\n',
|
|
|
|
'gm'
|
|
|
|
);
|
|
|
|
let match = reBlockStart.exec(raw);
|
|
|
|
while ( match !== null ) {
|
|
|
|
let beg = match.index + match[0].length;
|
|
|
|
let end = raw.indexOf(this.io.blockEndPrefix + match[1], beg);
|
|
|
|
this.blocks.set(parseInt(match[1], 10), raw.slice(beg, end));
|
|
|
|
reBlockStart.lastIndex = end;
|
|
|
|
match = reBlockStart.exec(raw);
|
|
|
|
}
|
|
|
|
if ( blockId !== undefined ) {
|
|
|
|
this.select(blockId);
|
|
|
|
}
|
|
|
|
}
|
2017-05-12 16:35:11 +02:00
|
|
|
};
|
|
|
|
|
2018-10-23 19:01:08 +02:00
|
|
|
µBlock.CompiledLineIO.Writer.prototype = {
|
2017-05-25 23:46:59 +02:00
|
|
|
push: function(args) {
|
2017-12-28 19:49:02 +01:00
|
|
|
this.block[this.block.length] = this.stringifier(args);
|
|
|
|
},
|
|
|
|
select: function(blockId) {
|
|
|
|
if ( blockId === this.blockId ) { return; }
|
|
|
|
this.blockId = blockId;
|
|
|
|
this.block = this.blocks.get(blockId);
|
|
|
|
if ( this.block === undefined ) {
|
|
|
|
this.blocks.set(blockId, (this.block = []));
|
|
|
|
}
|
2017-05-25 23:46:59 +02:00
|
|
|
},
|
|
|
|
toString: function() {
|
2018-10-23 19:01:08 +02:00
|
|
|
let result = [];
|
|
|
|
for ( let [ id, lines ] of this.blocks ) {
|
|
|
|
if ( lines.length === 0 ) { continue; }
|
2017-12-28 19:49:02 +01:00
|
|
|
result.push(
|
2018-10-23 19:01:08 +02:00
|
|
|
this.io.blockStartPrefix + id,
|
|
|
|
lines.join('\n'),
|
|
|
|
this.io.blockEndPrefix + id
|
2017-12-28 19:49:02 +01:00
|
|
|
);
|
|
|
|
}
|
|
|
|
return result.join('\n');
|
2017-05-12 16:35:11 +02:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
2018-10-23 19:01:08 +02:00
|
|
|
µBlock.CompiledLineIO.Reader.prototype = {
|
2017-05-25 23:46:59 +02:00
|
|
|
next: function() {
|
|
|
|
if ( this.offset === this.len ) {
|
2017-12-28 19:49:02 +01:00
|
|
|
this.line = '';
|
2017-05-25 23:46:59 +02:00
|
|
|
return false;
|
|
|
|
}
|
2018-10-23 19:01:08 +02:00
|
|
|
let pos = this.block.indexOf('\n', this.offset);
|
2017-05-25 23:46:59 +02:00
|
|
|
if ( pos !== -1 ) {
|
2017-12-28 19:49:02 +01:00
|
|
|
this.line = this.block.slice(this.offset, pos);
|
2017-05-25 23:46:59 +02:00
|
|
|
this.offset = pos + 1;
|
|
|
|
} else {
|
2017-12-28 19:49:02 +01:00
|
|
|
this.line = this.block.slice(this.offset);
|
2017-05-25 23:46:59 +02:00
|
|
|
this.offset = this.len;
|
|
|
|
}
|
|
|
|
return true;
|
|
|
|
},
|
2017-12-28 19:49:02 +01:00
|
|
|
select: function(blockId) {
|
|
|
|
this.block = this.blocks.get(blockId) || '';
|
|
|
|
this.len = this.block.length;
|
|
|
|
this.offset = 0;
|
|
|
|
return this;
|
|
|
|
},
|
2017-05-25 23:46:59 +02:00
|
|
|
fingerprint: function() {
|
2017-12-28 19:49:02 +01:00
|
|
|
return this.line;
|
2017-05-25 23:46:59 +02:00
|
|
|
},
|
|
|
|
args: function() {
|
2017-12-28 19:49:02 +01:00
|
|
|
return this.parser(this.line);
|
2017-05-25 23:46:59 +02:00
|
|
|
}
|
2017-05-12 16:35:11 +02:00
|
|
|
};
|
|
|
|
|
2016-09-12 16:22:25 +02:00
|
|
|
/******************************************************************************/
|
|
|
|
|
2016-09-16 23:41:17 +02:00
|
|
|
µBlock.openNewTab = function(details) {
|
|
|
|
if ( details.url.startsWith('logger-ui.html') ) {
|
|
|
|
if ( details.shiftKey ) {
|
2016-09-17 01:12:16 +02:00
|
|
|
this.changeUserSettings(
|
|
|
|
'alwaysDetachLogger',
|
|
|
|
!this.userSettings.alwaysDetachLogger
|
|
|
|
);
|
2016-09-16 23:41:17 +02:00
|
|
|
}
|
|
|
|
details.popup = this.userSettings.alwaysDetachLogger;
|
2018-12-14 17:01:21 +01:00
|
|
|
if ( details.popup ) {
|
|
|
|
const url = new URL(vAPI.getURL(details.url));
|
|
|
|
url.searchParams.set('popup', '1');
|
|
|
|
details.url = url.href;
|
|
|
|
let popupLoggerBox;
|
|
|
|
try {
|
|
|
|
popupLoggerBox = JSON.parse(
|
|
|
|
vAPI.localStorage.getItem('popupLoggerBox')
|
|
|
|
);
|
|
|
|
} catch(ex) {
|
|
|
|
}
|
|
|
|
if ( popupLoggerBox !== undefined ) {
|
|
|
|
details.box = popupLoggerBox;
|
|
|
|
}
|
|
|
|
}
|
2016-09-16 23:41:17 +02:00
|
|
|
}
|
|
|
|
vAPI.tabs.open(details);
|
|
|
|
};
|
|
|
|
|
|
|
|
/******************************************************************************/
|
2017-01-27 19:44:52 +01:00
|
|
|
|
2017-10-21 19:43:46 +02:00
|
|
|
µBlock.MRUCache = function(size) {
|
|
|
|
this.size = size;
|
|
|
|
this.array = [];
|
|
|
|
this.map = new Map();
|
2017-12-21 23:05:25 +01:00
|
|
|
this.resetTime = Date.now();
|
2017-10-21 19:43:46 +02:00
|
|
|
};
|
|
|
|
|
|
|
|
µBlock.MRUCache.prototype = {
|
|
|
|
add: function(key, value) {
|
|
|
|
var found = this.map.has(key);
|
|
|
|
this.map.set(key, value);
|
|
|
|
if ( !found ) {
|
|
|
|
if ( this.array.length === this.size ) {
|
|
|
|
this.map.delete(this.array.pop());
|
|
|
|
}
|
|
|
|
this.array.unshift(key);
|
|
|
|
}
|
|
|
|
},
|
|
|
|
remove: function(key) {
|
|
|
|
if ( this.map.has(key) ) {
|
|
|
|
this.array.splice(this.array.indexOf(key), 1);
|
|
|
|
}
|
|
|
|
},
|
|
|
|
lookup: function(key) {
|
|
|
|
var value = this.map.get(key);
|
|
|
|
if ( value !== undefined && this.array[0] !== key ) {
|
2017-12-22 15:37:26 +01:00
|
|
|
var i = this.array.indexOf(key);
|
|
|
|
do {
|
|
|
|
this.array[i] = this.array[i-1];
|
|
|
|
} while ( --i );
|
|
|
|
this.array[0] = key;
|
2017-10-21 19:43:46 +02:00
|
|
|
}
|
|
|
|
return value;
|
|
|
|
},
|
|
|
|
reset: function() {
|
|
|
|
this.array = [];
|
|
|
|
this.map.clear();
|
2017-12-21 23:05:25 +01:00
|
|
|
this.resetTime = Date.now();
|
2017-10-21 19:43:46 +02:00
|
|
|
}
|
|
|
|
};
|
|
|
|
|
|
|
|
/******************************************************************************/
|
2017-11-09 18:53:05 +01:00
|
|
|
|
|
|
|
// https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions
|
|
|
|
|
|
|
|
µBlock.escapeRegex = function(s) {
|
|
|
|
return s.replace(/[.*+?^${}()|[\]\\]/g, '\\$&');
|
|
|
|
};
|
|
|
|
|
|
|
|
/******************************************************************************/
|
2018-06-03 19:27:42 +02:00
|
|
|
|
|
|
|
µBlock.decomposeHostname = (function() {
|
|
|
|
// For performance purpose, as simple tests as possible
|
|
|
|
let reHostnameVeryCoarse = /[g-z_-]/;
|
|
|
|
let reIPv4VeryCoarse = /\.\d+$/;
|
|
|
|
|
|
|
|
let toBroaderHostname = function(hostname) {
|
|
|
|
let pos = hostname.indexOf('.');
|
|
|
|
if ( pos !== -1 ) {
|
|
|
|
return hostname.slice(pos + 1);
|
|
|
|
}
|
|
|
|
return hostname !== '*' && hostname !== '' ? '*' : '';
|
|
|
|
};
|
|
|
|
|
2018-08-09 17:31:25 +02:00
|
|
|
let toBroaderIPv4Address = function(ipaddress) {
|
|
|
|
if ( ipaddress === '*' || ipaddress === '' ) { return ''; }
|
|
|
|
let pos = ipaddress.lastIndexOf('.');
|
|
|
|
if ( pos === -1 ) { return '*'; }
|
|
|
|
return ipaddress.slice(0, pos);
|
|
|
|
};
|
|
|
|
|
|
|
|
let toBroaderIPv6Address = function(ipaddress) {
|
2018-06-03 19:27:42 +02:00
|
|
|
return ipaddress !== '*' && ipaddress !== '' ? '*' : '';
|
|
|
|
};
|
|
|
|
|
|
|
|
return function decomposeHostname(hostname, decomposed) {
|
|
|
|
if ( decomposed.length === 0 || decomposed[0] !== hostname ) {
|
2018-08-09 17:31:25 +02:00
|
|
|
let broaden;
|
|
|
|
if ( reHostnameVeryCoarse.test(hostname) === false ) {
|
2018-09-03 22:15:51 +02:00
|
|
|
if ( reIPv4VeryCoarse.test(hostname) ) {
|
2018-08-09 17:31:25 +02:00
|
|
|
broaden = toBroaderIPv4Address;
|
|
|
|
} else if ( hostname.startsWith('[') ) {
|
|
|
|
broaden = toBroaderIPv6Address;
|
|
|
|
}
|
|
|
|
}
|
|
|
|
if ( broaden === undefined ) {
|
|
|
|
broaden = toBroaderHostname;
|
|
|
|
}
|
2018-06-03 19:27:42 +02:00
|
|
|
decomposed[0] = hostname;
|
|
|
|
let i = 1;
|
|
|
|
for (;;) {
|
|
|
|
hostname = broaden(hostname);
|
|
|
|
if ( hostname === '' ) { break; }
|
|
|
|
decomposed[i++] = hostname;
|
|
|
|
}
|
|
|
|
decomposed.length = i;
|
|
|
|
}
|
|
|
|
return decomposed;
|
|
|
|
};
|
|
|
|
})();
|
2018-10-23 19:01:08 +02:00
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
|
|
|
|
// TODO: evaluate using TextEncoder/TextDecoder
|
|
|
|
|
|
|
|
µBlock.orphanizeString = function(s) {
|
|
|
|
return JSON.parse(JSON.stringify(s));
|
2018-12-14 17:01:21 +01:00
|
|
|
};
|
2019-02-14 19:33:55 +01:00
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
|
2019-04-20 15:06:54 +02:00
|
|
|
// Custom base64 encoder/decoder
|
2019-02-14 19:33:55 +01:00
|
|
|
//
|
|
|
|
// TODO:
|
|
|
|
// Could expand the LZ4 codec API to be able to return UTF8-safe string
|
|
|
|
// representation of a compressed buffer, and thus the code below could be
|
|
|
|
// moved LZ4 codec-side.
|
2019-03-16 14:00:31 +01:00
|
|
|
// https://github.com/uBlockOrigin/uBlock-issues/issues/461
|
|
|
|
// Provide a fallback encoding for Chromium 59 and less by issuing a plain
|
|
|
|
// JSON string. The fallback can be removed once min supported version is
|
|
|
|
// above 59.
|
2019-02-14 19:33:55 +01:00
|
|
|
|
2019-04-20 15:06:54 +02:00
|
|
|
µBlock.base64 = new (class {
|
|
|
|
constructor() {
|
|
|
|
this.valToDigit = new Uint8Array(64);
|
|
|
|
this.digitToVal = new Uint8Array(128);
|
|
|
|
const chars = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz@%";
|
|
|
|
for ( let i = 0, n = chars.length; i < n; i++ ) {
|
|
|
|
const c = chars.charCodeAt(i);
|
|
|
|
this.valToDigit[i] = c;
|
|
|
|
this.digitToVal[c] = i;
|
|
|
|
}
|
|
|
|
this.magic = 'Base64_1';
|
|
|
|
}
|
|
|
|
|
|
|
|
encode(arrbuf, arrlen) {
|
|
|
|
const inputLength = arrlen >>> 2;
|
|
|
|
const inbuf = new Uint32Array(arrbuf, 0, inputLength);
|
|
|
|
const outputLength = this.magic.length + 7 + inputLength * 7;
|
2019-02-14 19:33:55 +01:00
|
|
|
const outbuf = new Uint8Array(outputLength);
|
2019-04-20 15:06:54 +02:00
|
|
|
let j = 0;
|
|
|
|
for ( let i = 0; i < this.magic.length; i++ ) {
|
|
|
|
outbuf[j++] = this.magic.charCodeAt(i);
|
|
|
|
}
|
|
|
|
let v = inputLength;
|
|
|
|
do {
|
|
|
|
outbuf[j++] = this.valToDigit[v & 0b111111];
|
|
|
|
v >>>= 6;
|
|
|
|
} while ( v !== 0 );
|
|
|
|
outbuf[j++] = 0x20 /* ' ' */;
|
|
|
|
for ( let i = 0; i < inputLength; i++ ) {
|
|
|
|
v = inbuf[i];
|
|
|
|
do {
|
|
|
|
outbuf[j++] = this.valToDigit[v & 0b111111];
|
|
|
|
v >>>= 6;
|
|
|
|
} while ( v !== 0 );
|
|
|
|
outbuf[j++] = 0x20 /* ' ' */;
|
2019-02-14 19:33:55 +01:00
|
|
|
}
|
2019-04-28 20:07:21 +02:00
|
|
|
if ( typeof TextDecoder === 'undefined' ) {
|
|
|
|
return JSON.stringify(
|
|
|
|
Array.from(new Uint32Array(outbuf.buffer, 0, j >>> 2))
|
|
|
|
);
|
|
|
|
}
|
2019-02-14 19:33:55 +01:00
|
|
|
const textDecoder = new TextDecoder();
|
2019-04-20 15:06:54 +02:00
|
|
|
return textDecoder.decode(new Uint8Array(outbuf.buffer, 0, j));
|
|
|
|
}
|
|
|
|
|
|
|
|
decode(instr, arrbuf) {
|
2019-04-28 20:07:21 +02:00
|
|
|
if ( instr.charCodeAt(0) === 0x5B /* '[' */ ) {
|
2019-04-28 20:18:09 +02:00
|
|
|
const inbuf = JSON.parse(instr);
|
2019-04-28 20:07:21 +02:00
|
|
|
if ( arrbuf instanceof ArrayBuffer === false ) {
|
|
|
|
return new Uint32Array(inbuf);
|
|
|
|
}
|
|
|
|
const outbuf = new Uint32Array(arrbuf);
|
|
|
|
outbuf.set(inbuf);
|
|
|
|
return outbuf;
|
|
|
|
}
|
2019-04-20 15:06:54 +02:00
|
|
|
if ( instr.startsWith(this.magic) === false ) {
|
|
|
|
throw new Error('Invalid µBlock.base64 encoding');
|
2019-03-16 14:00:31 +01:00
|
|
|
}
|
2019-02-14 19:33:55 +01:00
|
|
|
const inputLength = instr.length;
|
2019-03-16 14:00:31 +01:00
|
|
|
const outbuf = arrbuf instanceof ArrayBuffer === false
|
2019-04-20 15:06:54 +02:00
|
|
|
? new Uint32Array(this.decodeSize(instr))
|
2019-03-16 14:00:31 +01:00
|
|
|
: new Uint32Array(arrbuf);
|
2019-04-20 15:06:54 +02:00
|
|
|
let i = instr.indexOf(' ', this.magic.length) + 1;
|
|
|
|
if ( i === -1 ) {
|
|
|
|
throw new Error('Invalid µBlock.base64 encoding');
|
|
|
|
}
|
|
|
|
let j = 0;
|
|
|
|
for (;;) {
|
|
|
|
if ( i === inputLength ) { break; }
|
|
|
|
let v = 0, l = 0;
|
|
|
|
for (;;) {
|
|
|
|
const c = instr.charCodeAt(i++);
|
|
|
|
if ( c === 0x20 /* ' ' */ ) { break; }
|
|
|
|
v += this.digitToVal[c] << l;
|
|
|
|
l += 6;
|
2019-03-16 14:00:31 +01:00
|
|
|
}
|
2019-04-20 15:06:54 +02:00
|
|
|
outbuf[j++] = v;
|
2019-03-16 14:00:31 +01:00
|
|
|
}
|
2019-04-20 15:06:54 +02:00
|
|
|
return outbuf;
|
|
|
|
}
|
|
|
|
|
|
|
|
decodeSize(instr) {
|
|
|
|
if ( instr.startsWith(this.magic) === false ) { return 0; }
|
|
|
|
let v = 0, l = 0, i = this.magic.length;
|
|
|
|
for (;;) {
|
|
|
|
const c = instr.charCodeAt(i++);
|
|
|
|
if ( c === 0x20 /* ' ' */ ) { break; }
|
|
|
|
v += this.digitToVal[c] << l;
|
|
|
|
l += 6;
|
2019-03-16 14:00:31 +01:00
|
|
|
}
|
2019-04-20 15:06:54 +02:00
|
|
|
return v << 2;
|
|
|
|
}
|
|
|
|
})();
|
2019-02-19 16:46:33 +01:00
|
|
|
|
|
|
|
/******************************************************************************/
|
|
|
|
|
|
|
|
// The requests.json.gz file can be downloaded from:
|
|
|
|
// https://cdn.cliqz.com/adblocking/requests_top500.json.gz
|
|
|
|
//
|
|
|
|
// Which is linked from:
|
|
|
|
// https://whotracks.me/blog/adblockers_performance_study.html
|
|
|
|
//
|
|
|
|
// Copy the file into ./tmp/requests.json.gz
|
|
|
|
//
|
|
|
|
// If the file is present when you build uBO using `make-[target].sh` from
|
|
|
|
// the shell, the resulting package will have `./assets/requests.json`, which
|
|
|
|
// will be looked-up by the method below to launch a benchmark session.
|
|
|
|
//
|
|
|
|
// From uBO's dev console, launch the benchmark:
|
|
|
|
// µBlock.staticNetFilteringEngine.benchmark();
|
|
|
|
//
|
|
|
|
// The advanced setting `consoleLogLevel` must be set to `info` to see the
|
|
|
|
// results in uBO's dev console, see:
|
|
|
|
// https://github.com/gorhill/uBlock/wiki/Advanced-settings#consoleloglevel
|
|
|
|
//
|
|
|
|
// The usual browser dev tools can be used to obtain useful profiling
|
|
|
|
// data, i.e. start the profiler, call the benchmark method from the
|
|
|
|
// console, then stop the profiler when it completes.
|
|
|
|
//
|
|
|
|
// Keep in mind that the measurements at the blog post above where obtained
|
|
|
|
// with ONLY EasyList. The CPU reportedly used was:
|
|
|
|
// https://www.cpubenchmark.net/cpu.php?cpu=Intel+Core+i7-6600U+%40+2.60GHz&id=2608
|
|
|
|
//
|
|
|
|
// Rename ./tmp/requests.json.gz to something else if you no longer want
|
|
|
|
// ./assets/requests.json in the build.
|
|
|
|
|
|
|
|
µBlock.loadBenchmarkDataset = (function() {
|
|
|
|
let datasetPromise;
|
|
|
|
let ttlTimer;
|
|
|
|
|
|
|
|
return function() {
|
|
|
|
if ( ttlTimer !== undefined ) {
|
|
|
|
clearTimeout(ttlTimer);
|
|
|
|
ttlTimer = undefined;
|
|
|
|
}
|
|
|
|
|
|
|
|
vAPI.setTimeout(( ) => {
|
|
|
|
ttlTimer = undefined;
|
|
|
|
datasetPromise = undefined;
|
|
|
|
}, 60000);
|
|
|
|
|
|
|
|
if ( datasetPromise !== undefined ) {
|
|
|
|
return datasetPromise;
|
|
|
|
}
|
|
|
|
|
2019-04-20 23:16:49 +02:00
|
|
|
console.info(`Loading benchmark dataset...`);
|
|
|
|
const url = vAPI.getURL('/assets/requests.json');
|
|
|
|
datasetPromise = µBlock.assets.fetchText(url).then(details => {
|
|
|
|
console.info(`Parsing benchmark dataset...`);
|
|
|
|
const requests = [];
|
|
|
|
const lineIter = new µBlock.LineIterator(details.content);
|
|
|
|
while ( lineIter.eot() === false ) {
|
|
|
|
let request;
|
|
|
|
try {
|
|
|
|
request = JSON.parse(lineIter.next());
|
|
|
|
} catch(ex) {
|
2019-02-19 16:46:33 +01:00
|
|
|
}
|
2019-04-20 23:16:49 +02:00
|
|
|
if ( request instanceof Object === false ) { continue; }
|
|
|
|
if ( !request.frameUrl || !request.url ) { continue; }
|
|
|
|
requests.push(request);
|
|
|
|
}
|
|
|
|
return requests;
|
|
|
|
}).catch(details => {
|
|
|
|
console.info(`Not found: ${details.url}`);
|
|
|
|
datasetPromise = undefined;
|
2019-02-19 16:46:33 +01:00
|
|
|
});
|
|
|
|
|
|
|
|
return datasetPromise;
|
|
|
|
};
|
|
|
|
})();
|