Skip to content

Commit 045d430

Browse files
committed
fix: lint and format
1 parent da92015 commit 045d430

File tree

3 files changed

+52
-8
lines changed

3 files changed

+52
-8
lines changed

packages/rum-core/src/domain/action/privacy/allowedDictionary.spec.ts

Lines changed: 49 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -54,19 +54,64 @@ if (isBrowserSupported()) {
5454
* Asian languages are not supported by our current tokenizer strategy.
5555
*/
5656
it('Tokenized results matches words and symbols in TEST_STRINGS', () => {
57-
const expectedParagraphMixed = ['This', "isn't", 'a', 'sentence', "it's", "RUM's", 'test', '💥', '=', '+', '+', 'and', 'more']
57+
const expectedParagraphMixed = [
58+
'This',
59+
"isn't",
60+
'a',
61+
'sentence',
62+
"it's",
63+
"RUM's",
64+
'test',
65+
'💥',
66+
'=',
67+
'+',
68+
'+',
69+
'and',
70+
'more',
71+
]
5872
expect(tokenize(TEST_STRINGS.PARAGRAPH_MIXED).sort()).toEqual(expectedParagraphMixed.sort())
5973

60-
const expectedFrench = ["C'est", 'pas', 'un', 'test', "c'est", "RUM's", 'test', '💥', '=', '+', '+', 'et', 'plus']
74+
const expectedFrench = ["C'est", 'pas', 'un', 'test', "c'est", "RUM's", 'test', '💥', '=', '+', '+', 'et', 'plus']
6175
expect(tokenize(LANGUAGES_TEST_STRINGS.FRENCH_MIXED_SENTENCE).sort()).toEqual(expectedFrench.sort())
6276

6377
const expectedSpanish = ['Este', 'no', 'es', 'un', 'test', 'es', "RUM's", 'test', '💥', '=', '+', '+', 'y', 'más']
6478
expect(tokenize(LANGUAGES_TEST_STRINGS.SPANISH_MIXED_SENTENCE).sort()).toEqual(expectedSpanish.sort())
6579

66-
const expectedGerman = ['Das', 'ist', 'kein', 'Test', 'das', 'ist', "RUM's", 'Test', '💥', '=', '+', '+', 'und', 'mehr']
80+
const expectedGerman = [
81+
'Das',
82+
'ist',
83+
'kein',
84+
'Test',
85+
'das',
86+
'ist',
87+
"RUM's",
88+
'Test',
89+
'💥',
90+
'=',
91+
'+',
92+
'+',
93+
'und',
94+
'mehr',
95+
]
6796
expect(tokenize(LANGUAGES_TEST_STRINGS.GERMAN_MIXED_SENTENCE).sort()).toEqual(expectedGerman.sort())
6897

69-
const expectedPortuguese = ['Este', 'não', 'é', 'um', 'teste', 'este', 'é', "RUM's", 'test', '💥', '=', '+', '+', 'e', 'mais']
98+
const expectedPortuguese = [
99+
'Este',
100+
'não',
101+
'é',
102+
'um',
103+
'teste',
104+
'este',
105+
'é',
106+
"RUM's",
107+
'test',
108+
'💥',
109+
'=',
110+
'+',
111+
'+',
112+
'e',
113+
'mais',
114+
]
70115
expect(tokenize(LANGUAGES_TEST_STRINGS.PORTUGUESE_MIXED_SENTENCE).sort()).toEqual(expectedPortuguese.sort())
71116
})
72117
})

packages/rum-core/src/domain/action/privacy/allowedDictionary.ts

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -24,12 +24,12 @@ function getOrInitRegexes(): UnicodeRegexes | undefined {
2424

2525
try {
2626
cachedRegexes = {
27-
// Split on punctuation, separators, and control characters
28-
splitRegex: new RegExp(`[^\\p{Separator}\\p{Cc}\\p{Sm}!"(),-./:;?[\\]\`_{|}]+`, 'gu'),
27+
// Split on separators, control characters, and selected punctuation
28+
splitRegex: new RegExp('[^\\p{Separator}\\p{Cc}\\p{Sm}!"(),-./:;?[\\]`_{|}]+', 'gu'),
2929
// Match letters (including apostrophes), emojis, and mathematical symbols
3030
matchRegex: new RegExp("[\\p{Letter}’']+|[\\p{Emoji_Presentation}]+|[\\p{Sm}]+", 'gu'),
3131
}
32-
} catch {
32+
} catch {
3333
cachedRegexes = undefined
3434
}
3535

packages/rum-core/src/domain/privacy.ts

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -36,7 +36,6 @@ export const FORM_PRIVATE_TAG_NAMES: { [tagName: string]: true } = {
3636
}
3737

3838
export const TEXT_MASKING_CHAR = 'x'
39-
export const TEXT_MASKING_STR= 'xxx'
4039

4140
export type NodePrivacyLevelCache = Map<Node, NodePrivacyLevel>
4241

0 commit comments

Comments
 (0)