mirror of
https://github.com/excalidraw/excalidraw.git
synced 2025-04-14 16:40:58 -04:00
feat: add first-class support for CJK (#8530)
This commit is contained in:
parent
21815fb930
commit
b479f3bd65
288 changed files with 3559 additions and 918 deletions
|
@ -31,7 +31,7 @@ The welcome screen consists of two main groups of subcomponents:
|
|||
|
||||
<img
|
||||
src={require("@site/static/img/welcome-screen-overview.png").default}
|
||||
alt="Excalidraw logo: Sketch handrawn like diagrams."
|
||||
alt="Excalidraw logo: Sketch hand-drawn like diagrams."
|
||||
/>
|
||||
|
||||
### Center
|
||||
|
|
|
@ -12,7 +12,7 @@ import { FONT_FAMILY } from "@excalidraw/excalidraw";
|
|||
|
||||
| Font Family | Description |
|
||||
| ----------- | ---------------------- |
|
||||
| `Virgil` | The `handwritten` font |
|
||||
| `Virgil` | The `Hand-drawn` font |
|
||||
| `Helvetica` | The `Normal` Font |
|
||||
| `Cascadia` | The `Code` Font |
|
||||
|
||||
|
|
|
@ -133,7 +133,7 @@
|
|||
<!-- Register Assistant as the UI font, before the scene inits -->
|
||||
<link
|
||||
rel="stylesheet"
|
||||
href="../packages/excalidraw/fonts/assets/fonts.css"
|
||||
href="../packages/excalidraw/fonts/css/fonts.css"
|
||||
type="text/css"
|
||||
/>
|
||||
|
||||
|
|
|
@ -25,8 +25,14 @@ export default defineConfig({
|
|||
output: {
|
||||
assetFileNames(chunkInfo) {
|
||||
if (chunkInfo?.name?.endsWith(".woff2")) {
|
||||
// TODO: consider splitting all fonts similar to Xiaolai
|
||||
// fonts don't change often, so hash is not necessary
|
||||
// put on root so we are flexible about the CDN path
|
||||
return "[name]-[hash][extname]";
|
||||
if (chunkInfo.name.includes("Xiaolai")) {
|
||||
return "[name][extname]";
|
||||
} else {
|
||||
return "[name]-[hash][extname]";
|
||||
}
|
||||
}
|
||||
|
||||
return "assets/[name]-[hash][extname]";
|
||||
|
@ -75,17 +81,21 @@ export default defineConfig({
|
|||
},
|
||||
|
||||
workbox: {
|
||||
// Don't push fonts, locales and wasm to app precache
|
||||
globIgnores: ["fonts.css", "**/locales/**", "service-worker.js", "**/*.wasm-*.js"],
|
||||
// don't precache fonts, locales and separate chunks
|
||||
globIgnores: ["fonts.css", "**/locales/**", "service-worker.js", "**/*.chunk-*.js"],
|
||||
runtimeCaching: [
|
||||
{
|
||||
urlPattern: new RegExp("/.+.(ttf|woff2|otf)"),
|
||||
handler: "CacheFirst",
|
||||
urlPattern: new RegExp(".+.woff2"),
|
||||
handler: 'CacheFirst',
|
||||
options: {
|
||||
cacheName: "fonts",
|
||||
cacheName: 'fonts',
|
||||
expiration: {
|
||||
maxEntries: 50,
|
||||
maxAgeSeconds: 60 * 60 * 24 * 90, // <== 90 days
|
||||
maxEntries: 1000,
|
||||
maxAgeSeconds: 60 * 60 * 24 * 90, // 90 days
|
||||
},
|
||||
cacheableResponse: {
|
||||
// 0 to cache "opaque" responses from cross-origin requests (i.e. CDN)
|
||||
statuses: [0, 200],
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -111,10 +121,10 @@ export default defineConfig({
|
|||
},
|
||||
},
|
||||
{
|
||||
urlPattern: new RegExp(".wasm-.+.js"),
|
||||
urlPattern: new RegExp(".chunk-.+.js"),
|
||||
handler: "CacheFirst",
|
||||
options: {
|
||||
cacheName: "wasm",
|
||||
cacheName: "chunk",
|
||||
expiration: {
|
||||
maxEntries: 50,
|
||||
maxAgeSeconds: 60 * 60 * 24 * 90, // <== 90 days
|
||||
|
|
|
@ -15,6 +15,8 @@ Please add the latest change on the top under the correct section.
|
|||
|
||||
### Features
|
||||
|
||||
- Added hand-drawn font for Chinese, Japanese and Korean (CJK) as a fallback for Excalifont. Improved overal text wrapping algorithm, not only accounting for CJK, but covering various edge cases with white spaces and text-align center/right. Added support for multi-codepoint emojis wrapping. Offloaded SVG export to Web Workers, with an automatic fallback to the main thread if not supported or not desired.[#8530](https://github.com/excalidraw/excalidraw/pull/8530)
|
||||
|
||||
- Prefer user defined coordinates and dimensions when creating a frame using [`convertToExcalidrawElements`](https://docs.excalidraw.com/docs/@excalidraw/excalidraw/api/excalidraw-element-skeleton#converttoexcalidrawelements) [#8517](https://github.com/excalidraw/excalidraw/pull/8517)
|
||||
|
||||
- `props.initialData` can now be a function that returns `ExcalidrawInitialDataState` or `Promise<ExcalidrawInitialDataState>`. [#8107](https://github.com/excalidraw/excalidraw/pull/8135)
|
||||
|
|
|
@ -147,14 +147,32 @@ export const actionCopyAsSvg = register({
|
|||
name: app.getName(),
|
||||
},
|
||||
);
|
||||
|
||||
const selectedElements = app.scene.getSelectedElements({
|
||||
selectedElementIds: appState.selectedElementIds,
|
||||
includeBoundTextElement: true,
|
||||
includeElementsInFrames: true,
|
||||
});
|
||||
|
||||
return {
|
||||
appState: {
|
||||
toast: {
|
||||
message: t("toast.copyToClipboardAsSvg", {
|
||||
exportSelection: selectedElements.length
|
||||
? t("toast.selection")
|
||||
: t("toast.canvas"),
|
||||
exportColorScheme: appState.exportWithDarkMode
|
||||
? t("buttons.darkMode")
|
||||
: t("buttons.lightMode"),
|
||||
}),
|
||||
},
|
||||
},
|
||||
storeAction: StoreAction.NONE,
|
||||
};
|
||||
} catch (error: any) {
|
||||
console.error(error);
|
||||
return {
|
||||
appState: {
|
||||
...appState,
|
||||
errorMessage: error.message,
|
||||
},
|
||||
storeAction: StoreAction.NONE,
|
||||
|
@ -164,6 +182,7 @@ export const actionCopyAsSvg = register({
|
|||
predicate: (elements) => {
|
||||
return probablySupportsClipboardWriteText && elements.length > 0;
|
||||
},
|
||||
keyTest: (event) => event.code === CODES.C && event.ctrlKey && event.shiftKey,
|
||||
keywords: ["svg", "clipboard", "copy"],
|
||||
});
|
||||
|
||||
|
|
|
@ -88,7 +88,7 @@ const shortcutMap: Record<ShortcutName, string[]> = {
|
|||
: getShortcutKey("CtrlOrCmd+Shift+]"),
|
||||
],
|
||||
copyAsPng: [getShortcutKey("Shift+Alt+C")],
|
||||
copyAsSvg: [],
|
||||
copyAsSvg: [getShortcutKey("Shift+Ctrl+C")],
|
||||
group: [getShortcutKey("CtrlOrCmd+G")],
|
||||
ungroup: [getShortcutKey("CtrlOrCmd+Shift+G")],
|
||||
gridMode: [getShortcutKey("CtrlOrCmd+'")],
|
||||
|
|
|
@ -10,7 +10,6 @@ import type {
|
|||
BinaryFiles,
|
||||
UIAppState,
|
||||
} from "../types";
|
||||
import type { MarkOptional } from "../utility-types";
|
||||
import type { StoreActionType } from "../store";
|
||||
|
||||
export type ActionSource =
|
||||
|
@ -24,10 +23,7 @@ export type ActionSource =
|
|||
export type ActionResult =
|
||||
| {
|
||||
elements?: readonly ExcalidrawElement[] | null;
|
||||
appState?: MarkOptional<
|
||||
AppState,
|
||||
"offsetTop" | "offsetLeft" | "width" | "height"
|
||||
> | null;
|
||||
appState?: Partial<AppState> | null;
|
||||
files?: BinaryFiles | null;
|
||||
storeAction: StoreActionType;
|
||||
replaceFiles?: boolean;
|
||||
|
|
|
@ -2150,11 +2150,12 @@ class App extends React.Component<AppProps, AppState> {
|
|||
editingTextElement = null;
|
||||
}
|
||||
|
||||
this.setState((state) => {
|
||||
// using Object.assign instead of spread to fool TS 4.2.2+ into
|
||||
// regarding the resulting type as not containing undefined
|
||||
// (which the following expression will never contain)
|
||||
return Object.assign(actionResult.appState || {}, {
|
||||
this.setState((prevAppState) => {
|
||||
const actionAppState = actionResult.appState || {};
|
||||
|
||||
return {
|
||||
...prevAppState,
|
||||
...actionAppState,
|
||||
// NOTE this will prevent opening context menu using an action
|
||||
// or programmatically from the host, so it will need to be
|
||||
// rewritten later
|
||||
|
@ -2165,7 +2166,7 @@ class App extends React.Component<AppProps, AppState> {
|
|||
theme,
|
||||
name,
|
||||
errorMessage,
|
||||
});
|
||||
};
|
||||
});
|
||||
|
||||
didUpdate = true;
|
||||
|
|
|
@ -21,7 +21,7 @@ export const DEFAULT_FONTS = [
|
|||
value: FONT_FAMILY.Excalifont,
|
||||
icon: FreedrawIcon,
|
||||
text: t("labels.handDrawn"),
|
||||
testId: "font-family-handrawn",
|
||||
testId: "font-family-hand-drawn",
|
||||
},
|
||||
{
|
||||
value: FONT_FAMILY.Nunito,
|
||||
|
|
|
@ -21,6 +21,7 @@ import { t } from "../../i18n";
|
|||
import { fontPickerKeyHandler } from "./keyboardNavHandlers";
|
||||
import { Fonts } from "../../fonts";
|
||||
import type { ValueOf } from "../../utility-types";
|
||||
import { FontFamilyNormalIcon } from "../icons";
|
||||
|
||||
export interface FontDescriptor {
|
||||
value: number;
|
||||
|
@ -62,12 +63,14 @@ export const FontPickerList = React.memo(
|
|||
const allFonts = useMemo(
|
||||
() =>
|
||||
Array.from(Fonts.registered.entries())
|
||||
.filter(([_, { metadata }]) => !metadata.serverSide)
|
||||
.map(([familyId, { metadata, fonts }]) => {
|
||||
.filter(
|
||||
([_, { metadata }]) => !metadata.serverSide && !metadata.fallback,
|
||||
)
|
||||
.map(([familyId, { metadata, fontFaces }]) => {
|
||||
const fontDescriptor = {
|
||||
value: familyId,
|
||||
icon: metadata.icon,
|
||||
text: fonts[0].fontFace.family,
|
||||
icon: metadata.icon ?? FontFamilyNormalIcon,
|
||||
text: fontFaces[0]?.fontFace?.family ?? "Unknown",
|
||||
};
|
||||
|
||||
if (metadata.deprecated) {
|
||||
|
@ -89,7 +92,7 @@ export const FontPickerList = React.memo(
|
|||
);
|
||||
|
||||
const sceneFamilies = useMemo(
|
||||
() => new Set(fonts.getSceneFontFamilies()),
|
||||
() => new Set(fonts.getSceneFamilies()),
|
||||
// cache per selected font family, so hover re-render won't mess it up
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
[selectedFontFamily],
|
||||
|
|
|
@ -374,6 +374,10 @@ export const HelpDialog = ({ onClose }: { onClose?: () => void }) => {
|
|||
shortcuts={[getShortcutKey("Shift+Alt+C")]}
|
||||
/>
|
||||
)}
|
||||
<Shortcut
|
||||
label={t("labels.copyAsSvg")}
|
||||
shortcuts={[getShortcutKey("Shift+Ctrl+C")]}
|
||||
/>
|
||||
<Shortcut
|
||||
label={t("labels.copyStyles")}
|
||||
shortcuts={[getShortcutKey("CtrlOrCmd+Alt+C")]}
|
||||
|
|
|
@ -48,6 +48,9 @@ const ChartPreviewBtn = (props: {
|
|||
viewBackgroundColor: oc.white,
|
||||
},
|
||||
null, // files
|
||||
{
|
||||
skipInliningFonts: true,
|
||||
},
|
||||
);
|
||||
svg.querySelector(".style-fonts")?.remove();
|
||||
previewNode.replaceChildren();
|
||||
|
|
|
@ -116,6 +116,9 @@ export const CLASSES = {
|
|||
SEARCH_MENU_INPUT_WRAPPER: "layer-ui__search-inputWrapper",
|
||||
};
|
||||
|
||||
export const CJK_HAND_DRAWN_FALLBACK_FONT = "Xiaolai";
|
||||
export const WINDOWS_EMOJI_FALLBACK_FONT = "Segoe UI Emoji";
|
||||
|
||||
/**
|
||||
* // TODO: shouldn't be really `const`, likely neither have integers as values, due to value for the custom fonts, which should likely be some hash.
|
||||
*
|
||||
|
@ -136,6 +139,22 @@ export const FONT_FAMILY = {
|
|||
"Liberation Sans": 9,
|
||||
};
|
||||
|
||||
export const FONT_FAMILY_FALLBACKS = {
|
||||
[CJK_HAND_DRAWN_FALLBACK_FONT]: 100,
|
||||
[WINDOWS_EMOJI_FALLBACK_FONT]: 1000,
|
||||
};
|
||||
|
||||
export const getFontFamilyFallbacks = (
|
||||
fontFamily: number,
|
||||
): Array<keyof typeof FONT_FAMILY_FALLBACKS> => {
|
||||
switch (fontFamily) {
|
||||
case FONT_FAMILY.Excalifont:
|
||||
return [CJK_HAND_DRAWN_FALLBACK_FONT, WINDOWS_EMOJI_FALLBACK_FONT];
|
||||
default:
|
||||
return [WINDOWS_EMOJI_FALLBACK_FONT];
|
||||
}
|
||||
};
|
||||
|
||||
export const THEME = {
|
||||
LIGHT: "light",
|
||||
DARK: "dark",
|
||||
|
@ -157,8 +176,6 @@ export const FRAME_STYLE = {
|
|||
nameLineHeight: 1.25,
|
||||
};
|
||||
|
||||
export const WINDOWS_EMOJI_FALLBACK_FONT = "Segoe UI Emoji";
|
||||
|
||||
export const MIN_FONT_SIZE = 1;
|
||||
export const DEFAULT_FONT_SIZE = 20;
|
||||
export const DEFAULT_FONT_FAMILY: FontFamilyValues = FONT_FAMILY.Excalifont;
|
||||
|
|
|
@ -2171,7 +2171,7 @@ exports[`Test Transform > should transform to labelled arrows when label provide
|
|||
"strokeColor": "#1098ad",
|
||||
"strokeStyle": "solid",
|
||||
"strokeWidth": 2,
|
||||
"text": "ANOTHER STYLED
|
||||
"text": "ANOTHER STYLED
|
||||
LABELLED ARROW",
|
||||
"textAlign": "center",
|
||||
"type": "text",
|
||||
|
@ -2179,8 +2179,8 @@ LABELLED ARROW",
|
|||
"version": 3,
|
||||
"versionNonce": Any<Number>,
|
||||
"verticalAlign": "middle",
|
||||
"width": 150,
|
||||
"x": 75,
|
||||
"width": 140,
|
||||
"x": 80,
|
||||
"y": 275,
|
||||
}
|
||||
`;
|
||||
|
@ -2213,7 +2213,7 @@ exports[`Test Transform > should transform to labelled arrows when label provide
|
|||
"strokeColor": "#099268",
|
||||
"strokeStyle": "solid",
|
||||
"strokeWidth": 2,
|
||||
"text": "ANOTHER STYLED
|
||||
"text": "ANOTHER STYLED
|
||||
LABELLED ARROW",
|
||||
"textAlign": "center",
|
||||
"type": "text",
|
||||
|
@ -2221,8 +2221,8 @@ LABELLED ARROW",
|
|||
"version": 3,
|
||||
"versionNonce": Any<Number>,
|
||||
"verticalAlign": "middle",
|
||||
"width": 150,
|
||||
"x": 75,
|
||||
"width": 140,
|
||||
"x": 80,
|
||||
"y": 375,
|
||||
}
|
||||
`;
|
||||
|
@ -2518,7 +2518,7 @@ exports[`Test Transform > should transform to text containers when label provide
|
|||
"strokeColor": "#1e1e1e",
|
||||
"strokeStyle": "solid",
|
||||
"strokeWidth": 2,
|
||||
"text": "ELLIPSE TEXT
|
||||
"text": "ELLIPSE TEXT
|
||||
CONTAINER",
|
||||
"textAlign": "center",
|
||||
"type": "text",
|
||||
|
@ -2526,8 +2526,8 @@ CONTAINER",
|
|||
"version": 3,
|
||||
"versionNonce": Any<Number>,
|
||||
"verticalAlign": "middle",
|
||||
"width": 130,
|
||||
"x": 534.7893218813452,
|
||||
"width": 120,
|
||||
"x": 539.7893218813452,
|
||||
"y": 117.44796179957173,
|
||||
}
|
||||
`;
|
||||
|
@ -2562,7 +2562,7 @@ TEXT CONTAINER",
|
|||
"strokeStyle": "solid",
|
||||
"strokeWidth": 2,
|
||||
"text": "DIAMOND
|
||||
TEXT
|
||||
TEXT
|
||||
CONTAINER",
|
||||
"textAlign": "center",
|
||||
"type": "text",
|
||||
|
@ -2646,8 +2646,8 @@ exports[`Test Transform > should transform to text containers when label provide
|
|||
"strokeColor": "#c2255c",
|
||||
"strokeStyle": "solid",
|
||||
"strokeWidth": 2,
|
||||
"text": "TOP LEFT ALIGNED
|
||||
RECTANGLE TEXT
|
||||
"text": "TOP LEFT ALIGNED
|
||||
RECTANGLE TEXT
|
||||
CONTAINER",
|
||||
"textAlign": "left",
|
||||
"type": "text",
|
||||
|
@ -2655,7 +2655,7 @@ CONTAINER",
|
|||
"version": 3,
|
||||
"versionNonce": Any<Number>,
|
||||
"verticalAlign": "top",
|
||||
"width": 170,
|
||||
"width": 160,
|
||||
"x": 505,
|
||||
"y": 305,
|
||||
}
|
||||
|
@ -2689,8 +2689,8 @@ exports[`Test Transform > should transform to text containers when label provide
|
|||
"strokeColor": "#c2255c",
|
||||
"strokeStyle": "solid",
|
||||
"strokeWidth": 2,
|
||||
"text": "STYLED
|
||||
ELLIPSE TEXT
|
||||
"text": "STYLED
|
||||
ELLIPSE TEXT
|
||||
CONTAINER",
|
||||
"textAlign": "center",
|
||||
"type": "text",
|
||||
|
@ -2698,8 +2698,8 @@ CONTAINER",
|
|||
"version": 3,
|
||||
"versionNonce": Any<Number>,
|
||||
"verticalAlign": "middle",
|
||||
"width": 130,
|
||||
"x": 534.7893218813452,
|
||||
"width": 120,
|
||||
"x": 539.7893218813452,
|
||||
"y": 522.5735931288071,
|
||||
}
|
||||
`;
|
||||
|
|
|
@ -14,20 +14,23 @@ import {
|
|||
import type { ExcalidrawTextElementWithContainer, FontString } from "./types";
|
||||
|
||||
describe("Test wrapText", () => {
|
||||
const font = "20px Cascadia, width: Segoe UI Emoji" as FontString;
|
||||
// font is irrelevant as jsdom does not support FontFace API
|
||||
// `measureText` width is mocked to return `text.length` by `jest-canvas-mock`
|
||||
// https://github.com/hustcc/jest-canvas-mock/blob/master/src/classes/TextMetrics.js
|
||||
const font = "10px Cascadia, Segoe UI Emoji" as FontString;
|
||||
|
||||
it("shouldn't add new lines for trailing spaces", () => {
|
||||
const text = "Hello whats up ";
|
||||
const maxWidth = 200 - BOUND_TEXT_PADDING * 2;
|
||||
const res = wrapText(text, font, maxWidth);
|
||||
expect(res).toBe(text);
|
||||
it("should wrap the text correctly when word length is exactly equal to max width", () => {
|
||||
const text = "Hello Excalidraw";
|
||||
// Length of "Excalidraw" is 100 and exacty equal to max width
|
||||
const res = wrapText(text, font, 100);
|
||||
expect(res).toEqual(`Hello\nExcalidraw`);
|
||||
});
|
||||
|
||||
it("should work with emojis", () => {
|
||||
const text = "😀";
|
||||
const maxWidth = 1;
|
||||
const res = wrapText(text, font, maxWidth);
|
||||
expect(res).toBe("😀");
|
||||
it("should return the text as is if max width is invalid", () => {
|
||||
const text = "Hello Excalidraw";
|
||||
expect(wrapText(text, font, NaN)).toEqual(text);
|
||||
expect(wrapText(text, font, -1)).toEqual(text);
|
||||
expect(wrapText(text, font, Infinity)).toEqual(text);
|
||||
});
|
||||
|
||||
it("should show the text correctly when max width reached", () => {
|
||||
|
@ -37,6 +40,237 @@ describe("Test wrapText", () => {
|
|||
expect(res).toBe("H\ne\nl\nl\no\n😀");
|
||||
});
|
||||
|
||||
it("should not wrap number when wrapping line", () => {
|
||||
const text = "don't wrap this number 99,100.99";
|
||||
const maxWidth = 300;
|
||||
const res = wrapText(text, font, maxWidth);
|
||||
expect(res).toBe("don't wrap this number\n99,100.99");
|
||||
});
|
||||
|
||||
it("should support multiple (multi-codepoint) emojis", () => {
|
||||
const text = "😀🗺🔥👩🏽🦰👨👩👧👦🇨🇿";
|
||||
const maxWidth = 1;
|
||||
const res = wrapText(text, font, maxWidth);
|
||||
expect(res).toBe("😀\n🗺\n🔥\n👩🏽🦰\n👨👩👧👦\n🇨🇿");
|
||||
});
|
||||
|
||||
it("should wrap the text correctly when text contains hyphen", () => {
|
||||
let text =
|
||||
"Wikipedia is hosted by Wikimedia- Foundation, a non-profit organization that also hosts a range-of other projects";
|
||||
const res = wrapText(text, font, 110);
|
||||
expect(res).toBe(
|
||||
`Wikipedia\nis hosted\nby\nWikimedia-\nFoundation,\na non-\nprofit\norganizatio\nn that also\nhosts a\nrange-of\nother\nprojects`,
|
||||
);
|
||||
|
||||
text = "Hello thereusing-now";
|
||||
expect(wrapText(text, font, 100)).toEqual("Hello\nthereusing\n-now");
|
||||
});
|
||||
|
||||
it("should support wrapping nested lists", () => {
|
||||
const text = `\tA) one tab\t\t- two tabs - 8 spaces`;
|
||||
|
||||
const maxWidth = 100;
|
||||
const res = wrapText(text, font, maxWidth);
|
||||
expect(res).toBe(`\tA) one\ntab\t\t- two\ntabs\n- 8 spaces`);
|
||||
|
||||
const maxWidth2 = 50;
|
||||
const res2 = wrapText(text, font, maxWidth2);
|
||||
expect(res2).toBe(`\tA)\none\ntab\n- two\ntabs\n- 8\nspace\ns`);
|
||||
});
|
||||
|
||||
describe("When text is CJK", () => {
|
||||
it("should break each CJK character when width is very small", () => {
|
||||
// "안녕하세요" (Hangul) + "こんにちは世界" (Hiragana, Kanji) + "コンニチハ" (Katakana) + "你好" (Han) = "Hello Hello World Hello Hi"
|
||||
const text = "안녕하세요こんにちは世界コンニチハ你好";
|
||||
const maxWidth = 10;
|
||||
const res = wrapText(text, font, maxWidth);
|
||||
expect(res).toBe(
|
||||
"안\n녕\n하\n세\n요\nこ\nん\nに\nち\nは\n世\n界\nコ\nン\nニ\nチ\nハ\n你\n好",
|
||||
);
|
||||
});
|
||||
|
||||
it("should break CJK text into longer segments when width is larger", () => {
|
||||
// "안녕하세요" (Hangul) + "こんにちは世界" (Hiragana, Kanji) + "コンニチハ" (Katakana) + "你好" (Han) = "Hello Hello World Hello Hi"
|
||||
const text = "안녕하세요こんにちは世界コンニチハ你好";
|
||||
const maxWidth = 30;
|
||||
const res = wrapText(text, font, maxWidth);
|
||||
|
||||
// measureText is mocked, so it's not precisely what would happen in prod
|
||||
expect(res).toBe("안녕하\n세요こ\nんにち\nは世界\nコンニ\nチハ你\n好");
|
||||
});
|
||||
|
||||
it("should handle a combination of CJK, latin, emojis and whitespaces", () => {
|
||||
const text = `a醫 醫 bb 你好 world-i-😀🗺🔥`;
|
||||
|
||||
const maxWidth = 150;
|
||||
const res = wrapText(text, font, maxWidth);
|
||||
expect(res).toBe(`a醫 醫 bb 你\n好 world-i-😀🗺\n🔥`);
|
||||
|
||||
const maxWidth2 = 50;
|
||||
const res2 = wrapText(text, font, maxWidth2);
|
||||
expect(res2).toBe(`a醫 醫\nbb 你\n好\nworld\n-i-😀\n🗺🔥`);
|
||||
|
||||
const maxWidth3 = 30;
|
||||
const res3 = wrapText(text, font, maxWidth3);
|
||||
expect(res3).toBe(`a醫\n醫\nbb\n你好\nwor\nld-\ni-\n😀\n🗺\n🔥`);
|
||||
});
|
||||
|
||||
it("should break before and after a regular CJK character", () => {
|
||||
const text = "HelloたWorld";
|
||||
const maxWidth1 = 50;
|
||||
const res1 = wrapText(text, font, maxWidth1);
|
||||
expect(res1).toBe("Hello\nた\nWorld");
|
||||
|
||||
const maxWidth2 = 60;
|
||||
const res2 = wrapText(text, font, maxWidth2);
|
||||
expect(res2).toBe("Helloた\nWorld");
|
||||
});
|
||||
|
||||
it("should break before and after certain CJK symbols", () => {
|
||||
const text = "こんにちは〃世界";
|
||||
const maxWidth1 = 50;
|
||||
const res1 = wrapText(text, font, maxWidth1);
|
||||
expect(res1).toBe("こんにちは\n〃世界");
|
||||
|
||||
const maxWidth2 = 60;
|
||||
const res2 = wrapText(text, font, maxWidth2);
|
||||
expect(res2).toBe("こんにちは〃\n世界");
|
||||
});
|
||||
|
||||
it("should break after, not before for certain CJK pairs", () => {
|
||||
const text = "Hello た。";
|
||||
const maxWidth = 70;
|
||||
const res = wrapText(text, font, maxWidth);
|
||||
expect(res).toBe("Hello\nた。");
|
||||
});
|
||||
|
||||
it("should break before, not after for certain CJK pairs", () => {
|
||||
const text = "Hello「たWorld」";
|
||||
const maxWidth = 60;
|
||||
const res = wrapText(text, font, maxWidth);
|
||||
expect(res).toBe("Hello\n「た\nWorld」");
|
||||
});
|
||||
|
||||
it("should break after, not before for certain CJK character pairs", () => {
|
||||
const text = "「Helloた」World";
|
||||
const maxWidth = 70;
|
||||
const res = wrapText(text, font, maxWidth);
|
||||
expect(res).toBe("「Hello\nた」World");
|
||||
});
|
||||
|
||||
it("should break Chinese sentences", () => {
|
||||
const text = `中国你好!这是一个测试。
|
||||
我们来看看:人民币¥1234「很贵」
|
||||
(括号)、逗号,句号。空格 换行 全角符号…—`;
|
||||
|
||||
const maxWidth1 = 80;
|
||||
const res1 = wrapText(text, font, maxWidth1);
|
||||
expect(res1).toBe(`中国你好!这是一\n个测试。
|
||||
我们来看看:人民\n币¥1234「很\n贵」
|
||||
(括号)、逗号,\n句号。空格 换行\n全角符号…—`);
|
||||
|
||||
const maxWidth2 = 50;
|
||||
const res2 = wrapText(text, font, maxWidth2);
|
||||
expect(res2).toBe(`中国你好!\n这是一个测\n试。
|
||||
我们来看\n看:人民币\n¥1234\n「很贵」
|
||||
(括号)、\n逗号,句\n号。空格\n换行 全角\n符号…—`);
|
||||
});
|
||||
});
|
||||
|
||||
it("should break Japanese sentences", () => {
|
||||
const text = `日本こんにちは!これはテストです。
|
||||
見てみましょう:円¥1234「高い」
|
||||
(括弧)、読点、句点。
|
||||
空白 改行 全角記号…ー`;
|
||||
|
||||
const maxWidth1 = 80;
|
||||
const res1 = wrapText(text, font, maxWidth1);
|
||||
expect(res1).toBe(`日本こんにちは!\nこれはテストで\nす。
|
||||
見てみましょ\nう:円¥1234\n「高い」
|
||||
(括弧)、読\n点、句点。
|
||||
空白 改行\n全角記号…ー`);
|
||||
|
||||
const maxWidth2 = 50;
|
||||
const res2 = wrapText(text, font, maxWidth2);
|
||||
expect(res2).toBe(`日本こんに\nちは!これ\nはテストで\nす。
|
||||
見てみ\nましょう:\n円\n¥1234\n「高い」
|
||||
(括\n弧)、読\n点、句点。
|
||||
空白\n改行 全角\n記号…ー`);
|
||||
});
|
||||
|
||||
it("should break Korean sentences", () => {
|
||||
const text = `한국 안녕하세요! 이것은 테스트입니다.
|
||||
우리 보자: 원화₩1234「비싸다」
|
||||
(괄호), 쉼표, 마침표.
|
||||
공백 줄바꿈 전각기호…—`;
|
||||
|
||||
const maxWidth1 = 80;
|
||||
const res1 = wrapText(text, font, maxWidth1);
|
||||
expect(res1).toBe(`한국 안녕하세\n요! 이것은 테\n스트입니다.
|
||||
우리 보자: 원\n화₩1234「비\n싸다」
|
||||
(괄호), 쉼\n표, 마침표.
|
||||
공백 줄바꿈 전\n각기호…—`);
|
||||
|
||||
const maxWidth2 = 60;
|
||||
const res2 = wrapText(text, font, maxWidth2);
|
||||
expect(res2).toBe(`한국 안녕하\n세요! 이것\n은 테스트입\n니다.
|
||||
우리 보자:\n원화\n₩1234\n「비싸다」
|
||||
(괄호),\n쉼표, 마침\n표.
|
||||
공백 줄바꿈\n전각기호…—`);
|
||||
});
|
||||
|
||||
describe("When text contains leading whitespaces", () => {
|
||||
const text = " \t Hello world";
|
||||
|
||||
it("should preserve leading whitespaces", () => {
|
||||
const maxWidth = 120;
|
||||
const res = wrapText(text, font, maxWidth);
|
||||
expect(res).toBe(" \t Hello\nworld");
|
||||
});
|
||||
|
||||
it("should break and collapse leading whitespaces when line breaks", () => {
|
||||
const maxWidth = 60;
|
||||
const res = wrapText(text, font, maxWidth);
|
||||
expect(res).toBe("\nHello\nworld");
|
||||
});
|
||||
|
||||
it("should break and collapse leading whitespaces whe words break", () => {
|
||||
const maxWidth = 30;
|
||||
const res = wrapText(text, font, maxWidth);
|
||||
expect(res).toBe("\nHel\nlo\nwor\nld");
|
||||
});
|
||||
});
|
||||
|
||||
describe("When text contains trailing whitespaces", () => {
|
||||
it("shouldn't add new lines for trailing spaces", () => {
|
||||
const text = "Hello whats up ";
|
||||
const maxWidth = 200 - BOUND_TEXT_PADDING * 2;
|
||||
const res = wrapText(text, font, maxWidth);
|
||||
expect(res).toBe(text);
|
||||
});
|
||||
|
||||
it("should ignore trailing whitespaces when line breaks", () => {
|
||||
const text = "Hippopotomonstrosesquippedaliophobia ??????";
|
||||
const maxWidth = 400;
|
||||
const res = wrapText(text, font, maxWidth);
|
||||
expect(res).toBe("Hippopotomonstrosesquippedaliophobia\n??????");
|
||||
});
|
||||
|
||||
it("should not ignore trailing whitespaces when word breaks", () => {
|
||||
const text = "Hippopotomonstrosesquippedaliophobia ??????";
|
||||
const maxWidth = 300;
|
||||
const res = wrapText(text, font, maxWidth);
|
||||
expect(res).toBe("Hippopotomonstrosesquippedalio\nphobia ??????");
|
||||
});
|
||||
|
||||
it("should ignore trailing whitespaces when word breaks and line breaks", () => {
|
||||
const text = "Hippopotomonstrosesquippedaliophobia ??????";
|
||||
const maxWidth = 180;
|
||||
const res = wrapText(text, font, maxWidth);
|
||||
expect(res).toBe("Hippopotomonstrose\nsquippedaliophobia\n??????");
|
||||
});
|
||||
});
|
||||
|
||||
describe("When text doesn't contain new lines", () => {
|
||||
const text = "Hello whats up";
|
||||
|
||||
|
@ -44,7 +278,7 @@ describe("Test wrapText", () => {
|
|||
{
|
||||
desc: "break all words when width of each word is less than container width",
|
||||
width: 80,
|
||||
res: `Hello \nwhats \nup`,
|
||||
res: `Hello\nwhats\nup`,
|
||||
},
|
||||
{
|
||||
desc: "break all characters when width of each character is less than container width",
|
||||
|
@ -66,7 +300,7 @@ p`,
|
|||
desc: "break words as per the width",
|
||||
|
||||
width: 140,
|
||||
res: `Hello whats \nup`,
|
||||
res: `Hello whats\nup`,
|
||||
},
|
||||
{
|
||||
desc: "fit the container",
|
||||
|
@ -96,7 +330,7 @@ whats up`;
|
|||
{
|
||||
desc: "break all words when width of each word is less than container width",
|
||||
width: 80,
|
||||
res: `Hello\nwhats \nup`,
|
||||
res: `Hello\nwhats\nup`,
|
||||
},
|
||||
{
|
||||
desc: "break all characters when width of each character is less than container width",
|
||||
|
@ -142,26 +376,24 @@ whats up`,
|
|||
{
|
||||
desc: "fit characters of long string as per container width",
|
||||
width: 170,
|
||||
res: `hellolongtextth\nisiswhatsupwith\nyouIamtypingggg\ngandtypinggg \nbreak it now`,
|
||||
res: `hellolongtextthi\nsiswhatsupwithyo\nuIamtypingggggan\ndtypinggg break\nit now`,
|
||||
},
|
||||
|
||||
{
|
||||
desc: "fit characters of long string as per container width and break words as per the width",
|
||||
|
||||
width: 130,
|
||||
res: `hellolongte
|
||||
xtthisiswha
|
||||
tsupwithyou
|
||||
Iamtypinggg
|
||||
ggandtyping
|
||||
gg break it
|
||||
now`,
|
||||
res: `hellolongtex
|
||||
tthisiswhats
|
||||
upwithyouIam
|
||||
typingggggan
|
||||
dtypinggg
|
||||
break it now`,
|
||||
},
|
||||
{
|
||||
desc: "fit the long text when container width is greater than text length and move the rest to next line",
|
||||
|
||||
width: 600,
|
||||
res: `hellolongtextthisiswhatsupwithyouIamtypingggggandtypinggg \nbreak it now`,
|
||||
res: `hellolongtextthisiswhatsupwithyouIamtypingggggandtypinggg\nbreak it now`,
|
||||
},
|
||||
].forEach((data) => {
|
||||
it(`should ${data.desc}`, () => {
|
||||
|
@ -171,68 +403,243 @@ now`,
|
|||
});
|
||||
});
|
||||
|
||||
it("should wrap the text correctly when word length is exactly equal to max width", () => {
|
||||
const text = "Hello Excalidraw";
|
||||
// Length of "Excalidraw" is 100 and exacty equal to max width
|
||||
const res = wrapText(text, font, 100);
|
||||
expect(res).toEqual(`Hello \nExcalidraw`);
|
||||
});
|
||||
describe("Test parseTokens", () => {
|
||||
it("should tokenize latin", () => {
|
||||
let text = "Excalidraw is a virtual collaborative whiteboard";
|
||||
|
||||
it("should return the text as is if max width is invalid", () => {
|
||||
const text = "Hello Excalidraw";
|
||||
expect(wrapText(text, font, NaN)).toEqual(text);
|
||||
expect(wrapText(text, font, -1)).toEqual(text);
|
||||
expect(wrapText(text, font, Infinity)).toEqual(text);
|
||||
});
|
||||
expect(parseTokens(text)).toEqual([
|
||||
"Excalidraw",
|
||||
" ",
|
||||
"is",
|
||||
" ",
|
||||
"a",
|
||||
" ",
|
||||
"virtual",
|
||||
" ",
|
||||
"collaborative",
|
||||
" ",
|
||||
"whiteboard",
|
||||
]);
|
||||
|
||||
it("should wrap the text correctly when text contains hyphen", () => {
|
||||
let text =
|
||||
"Wikipedia is hosted by Wikimedia- Foundation, a non-profit organization that also hosts a range-of other projects";
|
||||
const res = wrapText(text, font, 110);
|
||||
expect(res).toBe(
|
||||
`Wikipedia \nis hosted \nby \nWikimedia-\nFoundation,\na non-\nprofit \norganizati\non that \nalso hosts\na range-of\nother \nprojects`,
|
||||
);
|
||||
text =
|
||||
"Wikipedia is hosted by Wikimedia- Foundation, a non-profit organization that also hosts a range-of other projects";
|
||||
expect(parseTokens(text)).toEqual([
|
||||
"Wikipedia",
|
||||
" ",
|
||||
"is",
|
||||
" ",
|
||||
"hosted",
|
||||
" ",
|
||||
"by",
|
||||
" ",
|
||||
"Wikimedia-",
|
||||
" ",
|
||||
"Foundation,",
|
||||
" ",
|
||||
"a",
|
||||
" ",
|
||||
"non-",
|
||||
"profit",
|
||||
" ",
|
||||
"organization",
|
||||
" ",
|
||||
"that",
|
||||
" ",
|
||||
"also",
|
||||
" ",
|
||||
"hosts",
|
||||
" ",
|
||||
"a",
|
||||
" ",
|
||||
"range-",
|
||||
"of",
|
||||
" ",
|
||||
"other",
|
||||
" ",
|
||||
"projects",
|
||||
]);
|
||||
});
|
||||
|
||||
text = "Hello thereusing-now";
|
||||
expect(wrapText(text, font, 100)).toEqual("Hello \nthereusin\ng-now");
|
||||
});
|
||||
});
|
||||
it("should not tokenize number", () => {
|
||||
const text = "99,100.99";
|
||||
const tokens = parseTokens(text);
|
||||
expect(tokens).toEqual(["99,100.99"]);
|
||||
});
|
||||
|
||||
describe("Test parseTokens", () => {
|
||||
it("should split into tokens correctly", () => {
|
||||
let text = "Excalidraw is a virtual collaborative whiteboard";
|
||||
expect(parseTokens(text)).toEqual([
|
||||
"Excalidraw",
|
||||
"is",
|
||||
"a",
|
||||
"virtual",
|
||||
"collaborative",
|
||||
"whiteboard",
|
||||
]);
|
||||
it("should tokenize joined emojis", () => {
|
||||
const text = `😬🌍🗺🔥☂️👩🏽🦰👨👩👧👦👩🏾🔬🏳️🌈🧔♀️🧑🤝🧑🙅🏽♂️✅0️⃣🇨🇿🦅`;
|
||||
const tokens = parseTokens(text);
|
||||
|
||||
text =
|
||||
"Wikipedia is hosted by Wikimedia- Foundation, a non-profit organization that also hosts a range-of other projects";
|
||||
expect(parseTokens(text)).toEqual([
|
||||
"Wikipedia",
|
||||
"is",
|
||||
"hosted",
|
||||
"by",
|
||||
"Wikimedia-",
|
||||
"",
|
||||
"Foundation,",
|
||||
"a",
|
||||
"non-",
|
||||
"profit",
|
||||
"organization",
|
||||
"that",
|
||||
"also",
|
||||
"hosts",
|
||||
"a",
|
||||
"range-",
|
||||
"of",
|
||||
"other",
|
||||
"projects",
|
||||
]);
|
||||
expect(tokens).toEqual([
|
||||
"😬",
|
||||
"🌍",
|
||||
"🗺",
|
||||
"🔥",
|
||||
"☂️",
|
||||
"👩🏽🦰",
|
||||
"👨👩👧👦",
|
||||
"👩🏾🔬",
|
||||
"🏳️🌈",
|
||||
"🧔♀️",
|
||||
"🧑🤝🧑",
|
||||
"🙅🏽♂️",
|
||||
"✅",
|
||||
"0️⃣",
|
||||
"🇨🇿",
|
||||
"🦅",
|
||||
]);
|
||||
});
|
||||
|
||||
it("should tokenize emojis mixed with mixed text", () => {
|
||||
const text = `😬a🌍b🗺c🔥d☂️《👩🏽🦰》👨👩👧👦德👩🏾🔬こ🏳️🌈안🧔♀️g🧑🤝🧑h🙅🏽♂️e✅f0️⃣g🇨🇿10🦅#hash`;
|
||||
const tokens = parseTokens(text);
|
||||
|
||||
expect(tokens).toEqual([
|
||||
"😬",
|
||||
"a",
|
||||
"🌍",
|
||||
"b",
|
||||
"🗺",
|
||||
"c",
|
||||
"🔥",
|
||||
"d",
|
||||
"☂️",
|
||||
"《",
|
||||
"👩🏽🦰",
|
||||
"》",
|
||||
"👨👩👧👦",
|
||||
"德",
|
||||
"👩🏾🔬",
|
||||
"こ",
|
||||
"🏳️🌈",
|
||||
"안",
|
||||
"🧔♀️",
|
||||
"g",
|
||||
"🧑🤝🧑",
|
||||
"h",
|
||||
"🙅🏽♂️",
|
||||
"e",
|
||||
"✅",
|
||||
"f0️⃣g", // bummer, but ok, as we traded kecaps not breaking (less common) for hash and numbers not breaking (more common)
|
||||
"🇨🇿",
|
||||
"10", // nice! do not break the number, as it's by default matched by \p{Emoji}
|
||||
"🦅",
|
||||
"#hash", // nice! do not break the hash, as it's by default matched by \p{Emoji}
|
||||
]);
|
||||
});
|
||||
|
||||
it("should tokenize decomposed chars into their composed variants", () => {
|
||||
// each input character is in a decomposed form
|
||||
const text = "čでäぴέ다й한";
|
||||
expect(text.normalize("NFC").length).toEqual(8);
|
||||
expect(text).toEqual(text.normalize("NFD"));
|
||||
|
||||
const tokens = parseTokens(text);
|
||||
expect(tokens.length).toEqual(8);
|
||||
expect(tokens).toEqual(["č", "で", "ä", "ぴ", "έ", "다", "й", "한"]);
|
||||
});
|
||||
|
||||
it("should tokenize artificial CJK", () => {
|
||||
const text = `《道德經》醫-醫こんにちは世界!안녕하세요세계;다.다...원/달(((다)))[[1]]〚({((한))>)〛た…[Hello] World?ニューヨーク・¥3700.55す。090-1234-5678¥1,000〜$5,000「素晴らしい!」〔重要〕#1:Taro君30%は、(たなばた)〰¥110±¥570で20℃〜9:30〜10:00【一番】`;
|
||||
|
||||
// [
|
||||
// '《道', '德', '經》', '醫-',
|
||||
// '醫', 'こ', 'ん', 'に',
|
||||
// 'ち', 'は', '世', '界!',
|
||||
// '안', '녕', '하', '세',
|
||||
// '요', '세', '계;', '다.',
|
||||
// '다...', '원/', '달', '(((다)))',
|
||||
// '[[1]]', '〚({((한))>)〛', 'た…', '[Hello]',
|
||||
// ' ', 'World?', 'ニ', 'ュ',
|
||||
// 'ー', 'ヨ', 'ー', 'ク・',
|
||||
// '¥3700.55', 'す。', '090-', '1234-',
|
||||
// '5678¥1,000', '〜', '$5,000', '「素',
|
||||
// '晴', 'ら', 'し', 'い!」',
|
||||
// '〔重', '要〕', '#', '1:',
|
||||
// 'Taro', '君', '30%', 'は、',
|
||||
// '(た', 'な', 'ば', 'た)',
|
||||
// '〰', '¥110±', '¥570', 'で',
|
||||
// '20℃', '〜', '9:30', '〜',
|
||||
// '10:00', '【一', '番】'
|
||||
// ]
|
||||
const tokens = parseTokens(text);
|
||||
|
||||
// Latin
|
||||
expect(tokens).toContain("[[1]]");
|
||||
expect(tokens).toContain("[Hello]");
|
||||
expect(tokens).toContain("World?");
|
||||
expect(tokens).toContain("Taro");
|
||||
|
||||
// Chinese
|
||||
expect(tokens).toContain("《道");
|
||||
expect(tokens).toContain("德");
|
||||
expect(tokens).toContain("經》");
|
||||
expect(tokens).toContain("醫-");
|
||||
expect(tokens).toContain("醫");
|
||||
|
||||
// Japanese
|
||||
expect(tokens).toContain("こ");
|
||||
expect(tokens).toContain("ん");
|
||||
expect(tokens).toContain("に");
|
||||
expect(tokens).toContain("ち");
|
||||
expect(tokens).toContain("は");
|
||||
expect(tokens).toContain("世");
|
||||
expect(tokens).toContain("ニ");
|
||||
expect(tokens).toContain("ク・");
|
||||
expect(tokens).toContain("界!");
|
||||
expect(tokens).toContain("た…");
|
||||
expect(tokens).toContain("す。");
|
||||
expect(tokens).toContain("ュ");
|
||||
expect(tokens).toContain("ー");
|
||||
expect(tokens).toContain("「素");
|
||||
expect(tokens).toContain("晴");
|
||||
expect(tokens).toContain("ら");
|
||||
expect(tokens).toContain("し");
|
||||
expect(tokens).toContain("い!」");
|
||||
expect(tokens).toContain("君");
|
||||
expect(tokens).toContain("は、");
|
||||
expect(tokens).toContain("(た");
|
||||
expect(tokens).toContain("な");
|
||||
expect(tokens).toContain("ば");
|
||||
expect(tokens).toContain("た)");
|
||||
expect(tokens).toContain("で");
|
||||
expect(tokens).toContain("【一");
|
||||
expect(tokens).toContain("番】");
|
||||
|
||||
// Check for Korean
|
||||
expect(tokens).toContain("안");
|
||||
expect(tokens).toContain("녕");
|
||||
expect(tokens).toContain("하");
|
||||
expect(tokens).toContain("세");
|
||||
expect(tokens).toContain("요");
|
||||
expect(tokens).toContain("세");
|
||||
expect(tokens).toContain("계;");
|
||||
expect(tokens).toContain("다.");
|
||||
expect(tokens).toContain("다...");
|
||||
expect(tokens).toContain("원/");
|
||||
expect(tokens).toContain("달");
|
||||
expect(tokens).toContain("(((다)))");
|
||||
expect(tokens).toContain("〚({((한))>)〛");
|
||||
|
||||
// Numbers and units
|
||||
expect(tokens).toContain("¥3700.55");
|
||||
expect(tokens).toContain("090-");
|
||||
expect(tokens).toContain("1234-");
|
||||
expect(tokens).toContain("5678¥1,000");
|
||||
expect(tokens).toContain("$5,000");
|
||||
expect(tokens).toContain("1:");
|
||||
expect(tokens).toContain("30%");
|
||||
expect(tokens).toContain("¥110±");
|
||||
expect(tokens).toContain("¥570");
|
||||
expect(tokens).toContain("20℃");
|
||||
expect(tokens).toContain("9:30");
|
||||
expect(tokens).toContain("10:00");
|
||||
|
||||
// Punctuation and symbols
|
||||
expect(tokens).toContain("〜");
|
||||
expect(tokens).toContain("〰");
|
||||
expect(tokens).toContain("#");
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
|
|
|
@ -16,6 +16,7 @@ import {
|
|||
BOUND_TEXT_PADDING,
|
||||
DEFAULT_FONT_FAMILY,
|
||||
DEFAULT_FONT_SIZE,
|
||||
ENV,
|
||||
TEXT_ALIGN,
|
||||
VERTICAL_ALIGN,
|
||||
} from "../constants";
|
||||
|
@ -30,6 +31,172 @@ import {
|
|||
} from "./containerCache";
|
||||
import type { ExtractSetType } from "../utility-types";
|
||||
|
||||
/**
|
||||
* Matches various emoji types.
|
||||
*
|
||||
* 1. basic emojis (😀, 🌍)
|
||||
* 2. flags (🇨🇿)
|
||||
* 3. multi-codepoint emojis:
|
||||
* - skin tones (👍🏽)
|
||||
* - variation selectors (☂️)
|
||||
* - keycaps (1️⃣)
|
||||
* - tag sequences (🏴)
|
||||
* - emoji sequences (👨👩👧👦, 👩🚀, 🏳️🌈)
|
||||
*
|
||||
* Unicode points:
|
||||
* - \uFE0F: presentation selector
|
||||
* - \u20E3: enclosing keycap
|
||||
* - \u200D: ZWJ (zero width joiner)
|
||||
* - \u{E0020}-\u{E007E}: tags
|
||||
* - \u{E007F}: cancel tag
|
||||
*
|
||||
* @see https://unicode.org/reports/tr51/#EBNF_and_Regex, with changes:
|
||||
* - replaced \p{Emoji} with [\p{Extended_Pictographic}\p{Emoji_Presentation}], see more in `should tokenize emojis mixed with mixed text` test
|
||||
* - replaced \p{Emod} with \p{Emoji_Modifier} as some do not understand the abbreviation (i.e. https://devina.io/redos-checker)
|
||||
*/
|
||||
const _EMOJI_CHAR =
|
||||
/(\p{RI}\p{RI}|[\p{Extended_Pictographic}\p{Emoji_Presentation}](?:\p{Emoji_Modifier}|\uFE0F\u20E3?|[\u{E0020}-\u{E007E}]+\u{E007F})?(?:\u200D(?:\p{RI}\p{RI}|[\p{Emoji}](?:\p{Emoji_Modifier}|\uFE0F\u20E3?|[\u{E0020}-\u{E007E}]+\u{E007F})?))*)/u;
|
||||
|
||||
/**
|
||||
* Detect a CJK char, though does not include every possible char used in CJK texts,
|
||||
* such as symbols and punctuations.
|
||||
*
|
||||
* By default every CJK is a breaking point, though CJK has additional breaking points,
|
||||
* including full width punctuations or symbols (Chinese and Japanese) and western punctuations (Korean).
|
||||
*
|
||||
* Additional CJK breaking point rules:
|
||||
* - expect a break before (lookahead), but not after (negative lookbehind), i.e. "(" or "("
|
||||
* - expect a break after (lookbehind), but not before (negative lookahead), i.e. ")" or ")"
|
||||
* - expect a break always (lookahead and lookbehind), i.e. "〃"
|
||||
*/
|
||||
const _CJK_CHAR =
|
||||
/\p{Script=Han}\p{Script=Hiragana}\p{Script=Katakana}\p{Script=Hangul}/u;
|
||||
|
||||
/**
|
||||
* Following characters break only with CJK, not with alphabetic characters.
|
||||
* This is essential for Korean, as it uses alphabetic punctuation, but expects CJK-like breaking points.
|
||||
*
|
||||
* Hello((た)) → ["Hello", "((た))"]
|
||||
* Hello((World)) → ["Hello((World))"]
|
||||
*/
|
||||
const _CJK_BREAK_NOT_AFTER_BUT_BEFORE = /<\(\[\{/u;
|
||||
const _CJK_BREAK_NOT_BEFORE_BUT_AFTER = />\)\]\}.,:;\?!/u;
|
||||
const _CJK_BREAK_ALWAYS = / 〃〜~〰#&*+-ー/=|¬ ̄¦/u;
|
||||
const _CJK_SYMBOLS_AND_PUNCTUATION =
|
||||
/()[]{}〈〉《》⦅⦆「」「」『』【】〖〗〔〕〘〙〚〛<>〝〞'〟・。゚゙,、.:;?!%ー/u;
|
||||
|
||||
/**
|
||||
* Following characters break with any character, even though are mostly used with CJK.
|
||||
*
|
||||
* Hello た。→ ["Hello", "た。"]
|
||||
* ↑ DON'T BREAK "た。" (negative lookahead)
|
||||
* Hello「た」 World → ["Hello", "「た」", "World"]
|
||||
* ↑ DON'T BREAK "「た" (negative lookbehind)
|
||||
* ↑ DON'T BREAK "た」"(negative lookahead)
|
||||
* ↑ BREAK BEFORE "「" (lookahead)
|
||||
* ↑ BREAK AFTER "」" (lookbehind)
|
||||
*/
|
||||
const _ANY_BREAK_NOT_AFTER_BUT_BEFORE = /([{〈《⦅「「『【〖〔〘〚<〝/u;
|
||||
const _ANY_BREAK_NOT_BEFORE_BUT_AFTER =
|
||||
/)]}〉》⦆」」』】〗〕〙〛>〞'〟・。゚゙,、.:;?!%±‥…\//u;
|
||||
|
||||
/**
|
||||
* Natural breaking points for any grammars.
|
||||
*
|
||||
* Hello-world
|
||||
* ↑ BREAK AFTER "-" → ["Hello-", "world"]
|
||||
* Hello world
|
||||
* ↑ BREAK ALWAYS " " → ["Hello", " ", "world"]
|
||||
*/
|
||||
const _ANY_BREAK_AFTER = /-/u;
|
||||
const _ANY_BREAK_ALWAYS = /\s/u;
|
||||
|
||||
/**
|
||||
* Simple fallback for browsers (mainly Safari < 16.4) that don't support "Lookbehind assertion".
|
||||
*
|
||||
* Browser support as of 10/2024:
|
||||
* - 91% Lookbehind assertion https://caniuse.com/mdn-javascript_regular_expressions_lookbehind_assertion
|
||||
* - 94% Unicode character class escape https://caniuse.com/mdn-javascript_regular_expressions_unicode_character_class_escape
|
||||
*
|
||||
* Does not include advanced CJK breaking rules, but covers most of the core cases, especially for latin.
|
||||
*/
|
||||
const BREAK_LINE_REGEX_SIMPLE = new RegExp(
|
||||
`${_EMOJI_CHAR.source}|([${_ANY_BREAK_ALWAYS.source}${_CJK_CHAR.source}${_CJK_BREAK_ALWAYS.source}${_ANY_BREAK_AFTER.source}])`,
|
||||
"u",
|
||||
);
|
||||
|
||||
// Hello World → ["Hello", " World"]
|
||||
// ↑ BREAK BEFORE " "
|
||||
// HelloたWorld → ["Hello", "たWorld"]
|
||||
// ↑ BREAK BEFORE "た"
|
||||
// Hello「World」→ ["Hello", "「World」"]
|
||||
// ↑ BREAK BEFORE "「"
|
||||
const getLookaheadBreakingPoints = () => {
|
||||
const ANY_BREAKING_POINT = `(?<![${_ANY_BREAK_NOT_AFTER_BUT_BEFORE.source}])(?=[${_ANY_BREAK_NOT_AFTER_BUT_BEFORE.source}${_ANY_BREAK_ALWAYS.source}])`;
|
||||
const CJK_BREAKING_POINT = `(?<![${_ANY_BREAK_NOT_AFTER_BUT_BEFORE.source}${_CJK_BREAK_NOT_AFTER_BUT_BEFORE.source}])(?=[${_CJK_BREAK_NOT_AFTER_BUT_BEFORE.source}]*[${_CJK_CHAR.source}${_CJK_BREAK_ALWAYS.source}])`;
|
||||
return new RegExp(`(?:${ANY_BREAKING_POINT}|${CJK_BREAKING_POINT})`, "u");
|
||||
};
|
||||
|
||||
// Hello World → ["Hello ", "World"]
|
||||
// ↑ BREAK AFTER " "
|
||||
// Hello-World → ["Hello-", "World"]
|
||||
// ↑ BREAK AFTER "-"
|
||||
// HelloたWorld → ["Helloた", "World"]
|
||||
// ↑ BREAK AFTER "た"
|
||||
//「Hello」World → ["「Hello」", "World"]
|
||||
// ↑ BREAK AFTER "」"
|
||||
const getLookbehindBreakingPoints = () => {
|
||||
const ANY_BREAKING_POINT = `(?![${_ANY_BREAK_NOT_BEFORE_BUT_AFTER.source}])(?<=[${_ANY_BREAK_NOT_BEFORE_BUT_AFTER.source}${_ANY_BREAK_ALWAYS.source}${_ANY_BREAK_AFTER.source}])`;
|
||||
const CJK_BREAKING_POINT = `(?![${_ANY_BREAK_NOT_BEFORE_BUT_AFTER.source}${_CJK_BREAK_NOT_BEFORE_BUT_AFTER.source}${_ANY_BREAK_AFTER.source}])(?<=[${_CJK_CHAR.source}${_CJK_BREAK_ALWAYS.source}][${_CJK_BREAK_NOT_BEFORE_BUT_AFTER.source}]*)`;
|
||||
return new RegExp(`(?:${ANY_BREAKING_POINT}|${CJK_BREAKING_POINT})`, "u");
|
||||
};
|
||||
|
||||
/**
|
||||
* Break a line based on the whitespaces, CJK / emoji chars and language specific breaking points,
|
||||
* like hyphen for alphabetic and various full-width codepoints for CJK - especially Japanese, e.g.:
|
||||
*
|
||||
* "Hello 世界。🌎🗺" → ["Hello", " ", "世", "界。", "🌎", "🗺"]
|
||||
* "Hello-world" → ["Hello-", "world"]
|
||||
* "「Hello World」" → ["「Hello", " ", "World」"]
|
||||
*/
|
||||
const getBreakLineRegexAdvanced = () =>
|
||||
new RegExp(
|
||||
`${_EMOJI_CHAR.source}|${getLookaheadBreakingPoints().source}|${
|
||||
getLookbehindBreakingPoints().source
|
||||
}`,
|
||||
"u",
|
||||
);
|
||||
|
||||
let cachedBreakLineRegex: RegExp | undefined;
|
||||
|
||||
// Lazy-load for browsers that don't support "Lookbehind assertion"
|
||||
const getBreakLineRegex = () => {
|
||||
if (!cachedBreakLineRegex) {
|
||||
try {
|
||||
cachedBreakLineRegex = getBreakLineRegexAdvanced();
|
||||
} catch {
|
||||
cachedBreakLineRegex = BREAK_LINE_REGEX_SIMPLE;
|
||||
}
|
||||
}
|
||||
|
||||
return cachedBreakLineRegex;
|
||||
};
|
||||
|
||||
const CJK_REGEX = new RegExp(
|
||||
`[${_CJK_CHAR.source}${_CJK_BREAK_ALWAYS.source}${_CJK_SYMBOLS_AND_PUNCTUATION.source}]`,
|
||||
"u",
|
||||
);
|
||||
|
||||
const EMOJI_REGEX = new RegExp(`${_EMOJI_CHAR.source}`, "u");
|
||||
|
||||
export const containsCJK = (text: string) => {
|
||||
return CJK_REGEX.test(text);
|
||||
};
|
||||
|
||||
export const containsEmoji = (text: string) => {
|
||||
return EMOJI_REGEX.test(text);
|
||||
};
|
||||
|
||||
export const normalizeText = (text: string) => {
|
||||
return (
|
||||
normalizeEOL(text)
|
||||
|
@ -408,22 +575,132 @@ export const getTextHeight = (
|
|||
return getLineHeightInPx(fontSize, lineHeight) * lineCount;
|
||||
};
|
||||
|
||||
export const parseTokens = (text: string) => {
|
||||
// Splitting words containing "-" as those are treated as separate words
|
||||
// by css wrapping algorithm eg non-profit => non-, profit
|
||||
const words = text.split("-");
|
||||
if (words.length > 1) {
|
||||
// non-proft org => ['non-', 'profit org']
|
||||
words.forEach((word, index) => {
|
||||
if (index !== words.length - 1) {
|
||||
words[index] = word += "-";
|
||||
}
|
||||
});
|
||||
export const parseTokens = (line: string) => {
|
||||
const breakLineRegex = getBreakLineRegex();
|
||||
|
||||
// normalizing to single-codepoint composed chars due to canonical equivalence of multi-codepoint versions for chars like č, で (~ so that we don't break a line in between c and ˇ)
|
||||
// filtering due to multi-codepoint chars like 👨👩👧👦, 👩🏽🦰
|
||||
return line.normalize("NFC").split(breakLineRegex).filter(Boolean);
|
||||
};
|
||||
|
||||
// handles multi-byte chars (é, 中) and purposefully does not handle multi-codepoint char (👨👩👧👦, 👩🏽🦰)
|
||||
const isSingleCharacter = (maybeSingleCharacter: string) => {
|
||||
return (
|
||||
maybeSingleCharacter.codePointAt(0) !== undefined &&
|
||||
maybeSingleCharacter.codePointAt(1) === undefined
|
||||
);
|
||||
};
|
||||
|
||||
const satisfiesWordInvariant = (word: string) => {
|
||||
if (import.meta.env.MODE === ENV.TEST || import.meta.env.DEV) {
|
||||
if (/\s/.test(word)) {
|
||||
throw new Error("Word should not contain any whitespaces!");
|
||||
}
|
||||
}
|
||||
// Joining the words with space and splitting them again with space to get the
|
||||
// final list of tokens
|
||||
// ['non-', 'profit org'] =>,'non- proft org' => ['non-','profit','org']
|
||||
return words.join(" ").split(" ");
|
||||
};
|
||||
|
||||
const wrapWord = (
|
||||
word: string,
|
||||
font: FontString,
|
||||
maxWidth: number,
|
||||
): Array<string> => {
|
||||
// multi-codepoint emojis are already broken apart and shouldn't be broken further
|
||||
if (EMOJI_REGEX.test(word)) {
|
||||
return [word];
|
||||
}
|
||||
|
||||
satisfiesWordInvariant(word);
|
||||
|
||||
const lines: Array<string> = [];
|
||||
const chars = Array.from(word);
|
||||
|
||||
let currentLine = "";
|
||||
let currentLineWidth = 0;
|
||||
|
||||
for (const char of chars) {
|
||||
const _charWidth = charWidth.calculate(char, font);
|
||||
const testLineWidth = currentLineWidth + _charWidth;
|
||||
|
||||
if (testLineWidth <= maxWidth) {
|
||||
currentLine = currentLine + char;
|
||||
currentLineWidth = testLineWidth;
|
||||
continue;
|
||||
}
|
||||
|
||||
if (currentLine) {
|
||||
lines.push(currentLine);
|
||||
}
|
||||
|
||||
currentLine = char;
|
||||
currentLineWidth = _charWidth;
|
||||
}
|
||||
|
||||
if (currentLine) {
|
||||
lines.push(currentLine);
|
||||
}
|
||||
|
||||
return lines;
|
||||
};
|
||||
|
||||
const wrapLine = (
|
||||
line: string,
|
||||
font: FontString,
|
||||
maxWidth: number,
|
||||
): string[] => {
|
||||
const lines: Array<string> = [];
|
||||
const tokens = parseTokens(line);
|
||||
const tokenIterator = tokens[Symbol.iterator]();
|
||||
|
||||
let currentLine = "";
|
||||
let currentLineWidth = 0;
|
||||
|
||||
let iterator = tokenIterator.next();
|
||||
|
||||
while (!iterator.done) {
|
||||
const token = iterator.value;
|
||||
const testLine = currentLine + token;
|
||||
|
||||
// cache single codepoint whitespace, CJK or emoji width calc. as kerning should not apply here
|
||||
const testLineWidth = isSingleCharacter(token)
|
||||
? currentLineWidth + charWidth.calculate(token, font)
|
||||
: getLineWidth(testLine, font, true);
|
||||
|
||||
// build up the current line, skipping length check for possibly trailing whitespaces
|
||||
if (/\s/.test(token) || testLineWidth <= maxWidth) {
|
||||
currentLine = testLine;
|
||||
currentLineWidth = testLineWidth;
|
||||
iterator = tokenIterator.next();
|
||||
continue;
|
||||
}
|
||||
|
||||
// current line is empty => just the token (word) is longer than `maxWidth` and needs to be wrapped
|
||||
if (!currentLine) {
|
||||
const wrappedWord = wrapWord(token, font, maxWidth);
|
||||
const trailingLine = wrappedWord[wrappedWord.length - 1] ?? "";
|
||||
const precedingLines = wrappedWord.slice(0, -1);
|
||||
|
||||
lines.push(...precedingLines);
|
||||
|
||||
// trailing line of the wrapped word might still be joined with next token/s
|
||||
currentLine = trailingLine;
|
||||
currentLineWidth = getLineWidth(trailingLine, font, true);
|
||||
iterator = tokenIterator.next();
|
||||
} else {
|
||||
// push & reset, but don't iterate on the next token, as we didn't use it yet!
|
||||
lines.push(currentLine.trimEnd());
|
||||
|
||||
// purposefully not iterating and not setting `currentLine` to `token`, so that we could use a simple !currentLine check above
|
||||
currentLine = "";
|
||||
currentLineWidth = 0;
|
||||
}
|
||||
}
|
||||
|
||||
// iterator done, push the trailing line if exists
|
||||
if (currentLine) {
|
||||
lines.push(currentLine.trimEnd());
|
||||
}
|
||||
|
||||
return lines;
|
||||
};
|
||||
|
||||
export const wrapText = (
|
||||
|
@ -440,134 +717,17 @@ export const wrapText = (
|
|||
|
||||
const lines: Array<string> = [];
|
||||
const originalLines = text.split("\n");
|
||||
const spaceAdvanceWidth = getLineWidth(" ", font, true);
|
||||
|
||||
let currentLine = "";
|
||||
let currentLineWidthTillNow = 0;
|
||||
|
||||
const push = (str: string) => {
|
||||
if (str.trim()) {
|
||||
lines.push(str);
|
||||
}
|
||||
};
|
||||
|
||||
const resetParams = () => {
|
||||
currentLine = "";
|
||||
currentLineWidthTillNow = 0;
|
||||
};
|
||||
|
||||
for (const originalLine of originalLines) {
|
||||
const currentLineWidth = getLineWidth(originalLine, font, true);
|
||||
|
||||
// Push the line if its <= maxWidth
|
||||
if (currentLineWidth <= maxWidth) {
|
||||
lines.push(originalLine);
|
||||
continue;
|
||||
}
|
||||
|
||||
const words = parseTokens(originalLine);
|
||||
resetParams();
|
||||
|
||||
let index = 0;
|
||||
|
||||
while (index < words.length) {
|
||||
const currentWordWidth = getLineWidth(words[index], font, true);
|
||||
|
||||
// This will only happen when single word takes entire width
|
||||
if (currentWordWidth === maxWidth) {
|
||||
push(words[index]);
|
||||
index++;
|
||||
}
|
||||
|
||||
// Start breaking longer words exceeding max width
|
||||
else if (currentWordWidth > maxWidth) {
|
||||
// push current line since the current word exceeds the max width
|
||||
// so will be appended in next line
|
||||
push(currentLine);
|
||||
|
||||
resetParams();
|
||||
|
||||
while (words[index].length > 0) {
|
||||
const currentChar = String.fromCodePoint(
|
||||
words[index].codePointAt(0)!,
|
||||
);
|
||||
|
||||
const line = currentLine + currentChar;
|
||||
// use advance width instead of the actual width as it's closest to the browser wapping algo
|
||||
// use width of the whole line instead of calculating individual chars to accomodate for kerning
|
||||
const lineAdvanceWidth = getLineWidth(line, font, true);
|
||||
const charAdvanceWidth = charWidth.calculate(currentChar, font);
|
||||
|
||||
currentLineWidthTillNow = lineAdvanceWidth;
|
||||
words[index] = words[index].slice(currentChar.length);
|
||||
|
||||
if (currentLineWidthTillNow >= maxWidth) {
|
||||
push(currentLine);
|
||||
currentLine = currentChar;
|
||||
currentLineWidthTillNow = charAdvanceWidth;
|
||||
} else {
|
||||
currentLine = line;
|
||||
}
|
||||
}
|
||||
// push current line if appending space exceeds max width
|
||||
if (currentLineWidthTillNow + spaceAdvanceWidth >= maxWidth) {
|
||||
push(currentLine);
|
||||
resetParams();
|
||||
// space needs to be appended before next word
|
||||
// as currentLine contains chars which couldn't be appended
|
||||
// to previous line unless the line ends with hyphen to sync
|
||||
// with css word-wrap
|
||||
} else if (!currentLine.endsWith("-")) {
|
||||
currentLine += " ";
|
||||
currentLineWidthTillNow += spaceAdvanceWidth;
|
||||
}
|
||||
index++;
|
||||
} else {
|
||||
// Start appending words in a line till max width reached
|
||||
while (currentLineWidthTillNow < maxWidth && index < words.length) {
|
||||
const word = words[index];
|
||||
currentLineWidthTillNow = getLineWidth(
|
||||
currentLine + word,
|
||||
font,
|
||||
true,
|
||||
);
|
||||
|
||||
if (currentLineWidthTillNow > maxWidth) {
|
||||
push(currentLine);
|
||||
resetParams();
|
||||
|
||||
break;
|
||||
}
|
||||
index++;
|
||||
|
||||
// if word ends with "-" then we don't need to add space
|
||||
// to sync with css word-wrap
|
||||
const shouldAppendSpace = !word.endsWith("-");
|
||||
currentLine += word;
|
||||
|
||||
if (shouldAppendSpace) {
|
||||
currentLine += " ";
|
||||
}
|
||||
|
||||
// Push the word if appending space exceeds max width
|
||||
if (currentLineWidthTillNow + spaceAdvanceWidth >= maxWidth) {
|
||||
if (shouldAppendSpace) {
|
||||
lines.push(currentLine.slice(0, -1));
|
||||
} else {
|
||||
lines.push(currentLine);
|
||||
}
|
||||
resetParams();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (currentLine.slice(-1) === " ") {
|
||||
// only remove last trailing space which we have added when joining words
|
||||
currentLine = currentLine.slice(0, -1);
|
||||
push(currentLine);
|
||||
}
|
||||
const wrappedLine = wrapLine(originalLine, font, maxWidth);
|
||||
lines.push(...wrappedLine);
|
||||
}
|
||||
|
||||
return lines.join("\n");
|
||||
|
@ -577,24 +737,30 @@ export const charWidth = (() => {
|
|||
const cachedCharWidth: { [key: FontString]: Array<number> } = {};
|
||||
|
||||
const calculate = (char: string, font: FontString) => {
|
||||
const ascii = char.charCodeAt(0);
|
||||
const unicode = char.charCodeAt(0);
|
||||
if (!cachedCharWidth[font]) {
|
||||
cachedCharWidth[font] = [];
|
||||
}
|
||||
if (!cachedCharWidth[font][ascii]) {
|
||||
if (!cachedCharWidth[font][unicode]) {
|
||||
const width = getLineWidth(char, font, true);
|
||||
cachedCharWidth[font][ascii] = width;
|
||||
cachedCharWidth[font][unicode] = width;
|
||||
}
|
||||
|
||||
return cachedCharWidth[font][ascii];
|
||||
return cachedCharWidth[font][unicode];
|
||||
};
|
||||
|
||||
const getCache = (font: FontString) => {
|
||||
return cachedCharWidth[font];
|
||||
};
|
||||
|
||||
const clearCache = (font: FontString) => {
|
||||
cachedCharWidth[font] = [];
|
||||
};
|
||||
|
||||
return {
|
||||
calculate,
|
||||
getCache,
|
||||
clearCache,
|
||||
};
|
||||
})();
|
||||
|
||||
|
|
|
@ -917,7 +917,7 @@ describe("textWysiwyg", () => {
|
|||
|
||||
Keyboard.exitTextEditor(editor);
|
||||
text = h.elements[1] as ExcalidrawTextElementWithContainer;
|
||||
expect(text.text).toBe("Hello \nWorld!");
|
||||
expect(text.text).toBe("Hello\nWorld!");
|
||||
expect(text.originalText).toBe("Hello World!");
|
||||
expect(text.y).toBe(
|
||||
rectangle.y + h.elements[0].height / 2 - text.height / 2,
|
||||
|
@ -1220,7 +1220,7 @@ describe("textWysiwyg", () => {
|
|||
);
|
||||
|
||||
expect((h.elements[1] as ExcalidrawTextElementWithContainer).text).toBe(
|
||||
"Online \nwhitebo\nard \ncollabo\nration \nmade \neasy",
|
||||
"Online\nwhiteboa\nrd\ncollabor\nation\nmade\neasy",
|
||||
);
|
||||
fireEvent.contextMenu(GlobalTestState.interactiveCanvas, {
|
||||
button: 2,
|
||||
|
|
|
@ -36,3 +36,28 @@ export class ImageSceneDataError extends Error {
|
|||
export class InvalidFractionalIndexError extends Error {
|
||||
public code = "ELEMENT_HAS_INVALID_INDEX" as const;
|
||||
}
|
||||
|
||||
type WorkerErrorCodes = "WORKER_URL_NOT_DEFINED" | "WORKER_IN_THE_MAIN_CHUNK";
|
||||
|
||||
export class WorkerUrlNotDefinedError extends Error {
|
||||
public code;
|
||||
constructor(
|
||||
message = "Worker URL is not defined!",
|
||||
code: WorkerErrorCodes = "WORKER_URL_NOT_DEFINED",
|
||||
) {
|
||||
super(message);
|
||||
this.name = "WorkerUrlNotDefinedError";
|
||||
this.code = code;
|
||||
}
|
||||
}
|
||||
export class WorkerInTheMainChunkError extends Error {
|
||||
public code;
|
||||
constructor(
|
||||
message = "Worker has to be in a separate chunk!",
|
||||
code: WorkerErrorCodes = "WORKER_IN_THE_MAIN_CHUNK",
|
||||
) {
|
||||
super(message);
|
||||
this.name = "WorkerInTheMainChunkError";
|
||||
this.code = code;
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,214 +0,0 @@
|
|||
import {
|
||||
base64ToArrayBuffer,
|
||||
stringToBase64,
|
||||
toByteString,
|
||||
} from "../data/encode";
|
||||
import { LOCAL_FONT_PROTOCOL } from "./metadata";
|
||||
import loadWoff2 from "./wasm/woff2.loader";
|
||||
import loadHbSubset from "./wasm/hb-subset.loader";
|
||||
|
||||
export interface Font {
|
||||
urls: URL[];
|
||||
fontFace: FontFace;
|
||||
getContent(codePoints: ReadonlySet<number>): Promise<string>;
|
||||
}
|
||||
export const UNPKG_FALLBACK_URL = `https://unpkg.com/${
|
||||
import.meta.env.VITE_PKG_NAME
|
||||
? `${import.meta.env.VITE_PKG_NAME}@${import.meta.env.PKG_VERSION}` // should be provided by vite during package build
|
||||
: "@excalidraw/excalidraw" // fallback to latest package version (i.e. for app)
|
||||
}/dist/prod/`;
|
||||
|
||||
export class ExcalidrawFont implements Font {
|
||||
public readonly urls: URL[];
|
||||
public readonly fontFace: FontFace;
|
||||
|
||||
constructor(family: string, uri: string, descriptors?: FontFaceDescriptors) {
|
||||
this.urls = ExcalidrawFont.createUrls(uri);
|
||||
|
||||
const sources = this.urls
|
||||
.map((url) => `url(${url}) ${ExcalidrawFont.getFormat(url)}`)
|
||||
.join(", ");
|
||||
|
||||
this.fontFace = new FontFace(family, sources, {
|
||||
display: "swap",
|
||||
style: "normal",
|
||||
weight: "400",
|
||||
...descriptors,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Tries to fetch woff2 content, based on the registered urls (from first to last, treated as fallbacks).
|
||||
*
|
||||
* NOTE: assumes usage of `dataurl` outside the browser environment
|
||||
*
|
||||
* @returns base64 with subsetted glyphs based on the passed codepoint, last defined url otherwise
|
||||
*/
|
||||
public async getContent(codePoints: ReadonlySet<number>): Promise<string> {
|
||||
let i = 0;
|
||||
const errorMessages = [];
|
||||
|
||||
while (i < this.urls.length) {
|
||||
const url = this.urls[i];
|
||||
|
||||
// it's dataurl (server), the font is inlined as base64, no need to fetch
|
||||
if (url.protocol === "data:") {
|
||||
const arrayBuffer = base64ToArrayBuffer(url.toString().split(",")[1]);
|
||||
|
||||
const base64 = await ExcalidrawFont.subsetGlyphsByCodePoints(
|
||||
arrayBuffer,
|
||||
codePoints,
|
||||
);
|
||||
|
||||
return base64;
|
||||
}
|
||||
|
||||
try {
|
||||
const response = await fetch(url, {
|
||||
headers: {
|
||||
Accept: "font/woff2",
|
||||
},
|
||||
});
|
||||
|
||||
if (response.ok) {
|
||||
const arrayBuffer = await response.arrayBuffer();
|
||||
const base64 = await ExcalidrawFont.subsetGlyphsByCodePoints(
|
||||
arrayBuffer,
|
||||
codePoints,
|
||||
);
|
||||
|
||||
return base64;
|
||||
}
|
||||
|
||||
// response not ok, try to continue
|
||||
errorMessages.push(
|
||||
`"${url.toString()}" returned status "${response.status}"`,
|
||||
);
|
||||
} catch (e) {
|
||||
errorMessages.push(`"${url.toString()}" returned error "${e}"`);
|
||||
}
|
||||
|
||||
i++;
|
||||
}
|
||||
|
||||
console.error(
|
||||
`Failed to fetch font "${
|
||||
this.fontFace.family
|
||||
}" from urls "${this.urls.toString()}`,
|
||||
JSON.stringify(errorMessages, undefined, 2),
|
||||
);
|
||||
|
||||
// in case of issues, at least return the last url as a content
|
||||
// defaults to unpkg for bundled fonts (so that we don't have to host them forever) and http url for others
|
||||
return this.urls.length ? this.urls[this.urls.length - 1].toString() : "";
|
||||
}
|
||||
|
||||
/**
|
||||
* Tries to subset glyphs in a font based on the used codepoints, returning the font as daturl.
|
||||
*
|
||||
* @param arrayBuffer font data buffer, preferrably in the woff2 format, though others should work as well
|
||||
* @param codePoints codepoints used to subset the glyphs
|
||||
*
|
||||
* @returns font with subsetted glyphs (all glyphs in case of errors) converted into a dataurl
|
||||
*/
|
||||
private static async subsetGlyphsByCodePoints(
|
||||
arrayBuffer: ArrayBuffer,
|
||||
codePoints: ReadonlySet<number>,
|
||||
): Promise<string> {
|
||||
try {
|
||||
// lazy loaded wasm modules to avoid multiple initializations in case of concurrent triggers
|
||||
const { compress, decompress } = await loadWoff2();
|
||||
const { subset } = await loadHbSubset();
|
||||
|
||||
const decompressedBinary = decompress(arrayBuffer).buffer;
|
||||
const subsetSnft = subset(decompressedBinary, codePoints);
|
||||
const compressedBinary = compress(subsetSnft.buffer);
|
||||
|
||||
return ExcalidrawFont.toBase64(compressedBinary.buffer);
|
||||
} catch (e) {
|
||||
console.error("Skipped glyph subsetting", e);
|
||||
// Fallback to encoding whole font in case of errors
|
||||
return ExcalidrawFont.toBase64(arrayBuffer);
|
||||
}
|
||||
}
|
||||
|
||||
private static async toBase64(arrayBuffer: ArrayBuffer) {
|
||||
let base64: string;
|
||||
|
||||
if (typeof Buffer !== "undefined") {
|
||||
// node + server-side
|
||||
base64 = Buffer.from(arrayBuffer).toString("base64");
|
||||
} else {
|
||||
base64 = await stringToBase64(await toByteString(arrayBuffer), true);
|
||||
}
|
||||
|
||||
return `data:font/woff2;base64,${base64}`;
|
||||
}
|
||||
|
||||
private static createUrls(uri: string): URL[] {
|
||||
if (uri.startsWith(LOCAL_FONT_PROTOCOL)) {
|
||||
// no url for local fonts
|
||||
return [];
|
||||
}
|
||||
|
||||
if (uri.startsWith("http") || uri.startsWith("data")) {
|
||||
// one url for http imports or data url
|
||||
return [new URL(uri)];
|
||||
}
|
||||
|
||||
// absolute assets paths, which are found in tests and excalidraw-app build, won't work with base url, so we are stripping initial slash away
|
||||
const assetUrl: string = uri.replace(/^\/+/, "");
|
||||
const urls: URL[] = [];
|
||||
|
||||
if (typeof window.EXCALIDRAW_ASSET_PATH === "string") {
|
||||
const normalizedBaseUrl = this.normalizeBaseUrl(
|
||||
window.EXCALIDRAW_ASSET_PATH,
|
||||
);
|
||||
|
||||
urls.push(new URL(assetUrl, normalizedBaseUrl));
|
||||
} else if (Array.isArray(window.EXCALIDRAW_ASSET_PATH)) {
|
||||
window.EXCALIDRAW_ASSET_PATH.forEach((path) => {
|
||||
const normalizedBaseUrl = this.normalizeBaseUrl(path);
|
||||
urls.push(new URL(assetUrl, normalizedBaseUrl));
|
||||
});
|
||||
}
|
||||
|
||||
// fallback url for bundled fonts
|
||||
urls.push(new URL(assetUrl, UNPKG_FALLBACK_URL));
|
||||
|
||||
return urls;
|
||||
}
|
||||
|
||||
private static getFormat(url: URL) {
|
||||
try {
|
||||
const parts = new URL(url).pathname.split(".");
|
||||
|
||||
if (parts.length === 1) {
|
||||
return "";
|
||||
}
|
||||
|
||||
return `format('${parts.pop()}')`;
|
||||
} catch (error) {
|
||||
return "";
|
||||
}
|
||||
}
|
||||
|
||||
private static normalizeBaseUrl(baseUrl: string) {
|
||||
let result = baseUrl;
|
||||
|
||||
// in case user passed a root-relative url (~absolute path),
|
||||
// like "/" or "/some/path", or relative (starts with "./"),
|
||||
// prepend it with `location.origin`
|
||||
if (/^\.?\//.test(result)) {
|
||||
result = new URL(
|
||||
result.replace(/^\.?\/+/, ""),
|
||||
window?.location?.origin,
|
||||
).toString();
|
||||
}
|
||||
|
||||
// ensure there is a trailing slash, otherwise url won't be correctly concatenated
|
||||
result = `${result.replace(/\/+$/, "")}/`;
|
||||
|
||||
return result;
|
||||
}
|
||||
}
|
213
packages/excalidraw/fonts/ExcalidrawFontFace.ts
Normal file
213
packages/excalidraw/fonts/ExcalidrawFontFace.ts
Normal file
|
@ -0,0 +1,213 @@
|
|||
import { promiseTry } from "../utils";
|
||||
import { LOCAL_FONT_PROTOCOL } from "./metadata";
|
||||
import { subsetWoff2GlyphsByCodepoints } from "./subset/subset-main";
|
||||
|
||||
type DataURL = string;
|
||||
|
||||
export interface IExcalidrawFontFace {
|
||||
urls: URL[] | DataURL[];
|
||||
fontFace: FontFace;
|
||||
toCSS(
|
||||
characters: string,
|
||||
codePoints: Array<number>,
|
||||
): Promise<string> | undefined;
|
||||
}
|
||||
|
||||
export class ExcalidrawFontFace implements IExcalidrawFontFace {
|
||||
public readonly urls: URL[] | DataURL[];
|
||||
public readonly fontFace: FontFace;
|
||||
|
||||
private static readonly UNPKG_FALLBACK_URL = `https://unpkg.com/${
|
||||
import.meta.env.VITE_PKG_NAME
|
||||
? `${import.meta.env.VITE_PKG_NAME}@${import.meta.env.PKG_VERSION}` // should be provided by vite during package build
|
||||
: "@excalidraw/excalidraw" // fallback to latest package version (i.e. for app)
|
||||
}/dist/prod/`;
|
||||
|
||||
constructor(family: string, uri: string, descriptors?: FontFaceDescriptors) {
|
||||
this.urls = ExcalidrawFontFace.createUrls(uri);
|
||||
|
||||
const sources = this.urls
|
||||
.map((url) => `url(${url}) ${ExcalidrawFontFace.getFormat(url)}`)
|
||||
.join(", ");
|
||||
|
||||
this.fontFace = new FontFace(family, sources, {
|
||||
display: "swap",
|
||||
style: "normal",
|
||||
weight: "400",
|
||||
...descriptors,
|
||||
});
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates CSS `@font-face` definition with the (subsetted) font source as a data url for the characters within the unicode range.
|
||||
*
|
||||
* Retrieves `undefined` otherwise.
|
||||
*/
|
||||
public toCSS(
|
||||
characters: string,
|
||||
codePoints: Array<number>,
|
||||
): Promise<string> | undefined {
|
||||
// quick exit in case the characters are not within this font face's unicode range
|
||||
if (!this.getUnicodeRangeRegex().test(characters)) {
|
||||
return;
|
||||
}
|
||||
|
||||
return this.getContent(codePoints).then(
|
||||
(content) =>
|
||||
`@font-face { font-family: ${this.fontFace.family}; src: url(${content}); }`,
|
||||
);
|
||||
}
|
||||
|
||||
/**
|
||||
* Tries to fetch woff2 content, based on the registered urls (from first to last, treated as fallbacks).
|
||||
*
|
||||
* @returns base64 with subsetted glyphs based on the passed codepoint, last defined url otherwise
|
||||
*/
|
||||
public async getContent(codePoints: Array<number>): Promise<string> {
|
||||
let i = 0;
|
||||
const errorMessages = [];
|
||||
|
||||
while (i < this.urls.length) {
|
||||
const url = this.urls[i];
|
||||
|
||||
try {
|
||||
const arrayBuffer = await this.fetchFont(url);
|
||||
const base64 = await subsetWoff2GlyphsByCodepoints(
|
||||
arrayBuffer,
|
||||
codePoints,
|
||||
);
|
||||
|
||||
return base64;
|
||||
} catch (e) {
|
||||
errorMessages.push(`"${url.toString()}" returned error "${e}"`);
|
||||
}
|
||||
|
||||
i++;
|
||||
}
|
||||
|
||||
console.error(
|
||||
`Failed to fetch font family "${this.fontFace.family}"`,
|
||||
JSON.stringify(errorMessages, undefined, 2),
|
||||
);
|
||||
|
||||
// in case of issues, at least return the last url as a content
|
||||
// defaults to unpkg for bundled fonts (so that we don't have to host them forever) and http url for others
|
||||
return this.urls.length ? this.urls[this.urls.length - 1].toString() : "";
|
||||
}
|
||||
|
||||
public fetchFont(url: URL | DataURL): Promise<ArrayBuffer> {
|
||||
return promiseTry(async () => {
|
||||
const response = await fetch(url, {
|
||||
headers: {
|
||||
Accept: "font/woff2",
|
||||
},
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const urlString = url instanceof URL ? url.toString() : "dataurl";
|
||||
throw new Error(
|
||||
`Failed to fetch "${urlString}": ${response.statusText}`,
|
||||
);
|
||||
}
|
||||
|
||||
const arrayBuffer = await response.arrayBuffer();
|
||||
return arrayBuffer;
|
||||
});
|
||||
}
|
||||
|
||||
private getUnicodeRangeRegex() {
|
||||
// using \u{h} or \u{hhhhh} to match any number of hex digits,
|
||||
// otherwise we would get an "Invalid Unicode escape" error
|
||||
// e.g. U+0-1007F -> \u{0}-\u{1007F}
|
||||
const unicodeRangeRegex = this.fontFace.unicodeRange
|
||||
.split(/,\s*/)
|
||||
.map((range) => {
|
||||
const [start, end] = range.replace("U+", "").split("-");
|
||||
if (end) {
|
||||
return `\\u{${start}}-\\u{${end}}`;
|
||||
}
|
||||
|
||||
return `\\u{${start}}`;
|
||||
})
|
||||
.join("");
|
||||
|
||||
return new RegExp(`[${unicodeRangeRegex}]`, "u");
|
||||
}
|
||||
|
||||
private static createUrls(uri: string): URL[] | DataURL[] {
|
||||
if (uri.startsWith("data")) {
|
||||
// don't create the URL instance, as parsing the huge dataurl string is expensive
|
||||
return [uri];
|
||||
}
|
||||
|
||||
if (uri.startsWith(LOCAL_FONT_PROTOCOL)) {
|
||||
// no url for local fonts
|
||||
return [];
|
||||
}
|
||||
|
||||
if (uri.startsWith("http")) {
|
||||
// one url for http imports or data url
|
||||
return [new URL(uri)];
|
||||
}
|
||||
|
||||
// absolute assets paths, which are found in tests and excalidraw-app build, won't work with base url, so we are stripping initial slash away
|
||||
const assetUrl: string = uri.replace(/^\/+/, "");
|
||||
const urls: URL[] = [];
|
||||
|
||||
if (typeof window.EXCALIDRAW_ASSET_PATH === "string") {
|
||||
const normalizedBaseUrl = this.normalizeBaseUrl(
|
||||
window.EXCALIDRAW_ASSET_PATH,
|
||||
);
|
||||
|
||||
urls.push(new URL(assetUrl, normalizedBaseUrl));
|
||||
} else if (Array.isArray(window.EXCALIDRAW_ASSET_PATH)) {
|
||||
window.EXCALIDRAW_ASSET_PATH.forEach((path) => {
|
||||
const normalizedBaseUrl = this.normalizeBaseUrl(path);
|
||||
urls.push(new URL(assetUrl, normalizedBaseUrl));
|
||||
});
|
||||
}
|
||||
|
||||
// fallback url for bundled fonts
|
||||
urls.push(new URL(assetUrl, ExcalidrawFontFace.UNPKG_FALLBACK_URL));
|
||||
|
||||
return urls;
|
||||
}
|
||||
|
||||
private static getFormat(url: URL | DataURL) {
|
||||
if (!(url instanceof URL)) {
|
||||
// format is irrelevant for data url
|
||||
return "";
|
||||
}
|
||||
|
||||
try {
|
||||
const parts = new URL(url).pathname.split(".");
|
||||
|
||||
if (parts.length === 1) {
|
||||
return "";
|
||||
}
|
||||
|
||||
return `format('${parts.pop()}')`;
|
||||
} catch (error) {
|
||||
return "";
|
||||
}
|
||||
}
|
||||
|
||||
private static normalizeBaseUrl(baseUrl: string) {
|
||||
let result = baseUrl;
|
||||
|
||||
// in case user passed a root-relative url (~absolute path),
|
||||
// like "/" or "/some/path", or relative (starts with "./"),
|
||||
// prepend it with `location.origin`
|
||||
if (/^\.?\//.test(result)) {
|
||||
result = new URL(
|
||||
result.replace(/^\.?\/+/, ""),
|
||||
window?.location?.origin,
|
||||
).toString();
|
||||
}
|
||||
|
||||
// ensure there is a trailing slash, otherwise url won't be correctly concatenated
|
||||
result = `${result.replace(/\/+$/, "")}/`;
|
||||
|
||||
return result;
|
||||
}
|
||||
}
|
|
@ -4,7 +4,7 @@
|
|||
|
||||
@font-face {
|
||||
font-family: "Assistant";
|
||||
src: url(./Assistant-Regular.woff2) format("woff2");
|
||||
src: url(../woff2/Assistant/Assistant-Regular.woff2) format("woff2");
|
||||
font-weight: 400;
|
||||
style: normal;
|
||||
display: swap;
|
||||
|
@ -12,7 +12,7 @@
|
|||
|
||||
@font-face {
|
||||
font-family: "Assistant";
|
||||
src: url(./Assistant-Medium.woff2) format("woff2");
|
||||
src: url(../woff2/Assistant/Assistant-Medium.woff2) format("woff2");
|
||||
font-weight: 500;
|
||||
style: normal;
|
||||
display: swap;
|
||||
|
@ -20,7 +20,7 @@
|
|||
|
||||
@font-face {
|
||||
font-family: "Assistant";
|
||||
src: url(./Assistant-SemiBold.woff2) format("woff2");
|
||||
src: url(../woff2/Assistant/Assistant-SemiBold.woff2) format("woff2");
|
||||
font-weight: 600;
|
||||
style: normal;
|
||||
display: swap;
|
||||
|
@ -28,7 +28,7 @@
|
|||
|
||||
@font-face {
|
||||
font-family: "Assistant";
|
||||
src: url(./Assistant-Bold.woff2) format("woff2");
|
||||
src: url(../woff2/Assistant/Assistant-Bold.woff2) format("woff2");
|
||||
font-weight: 700;
|
||||
style: normal;
|
||||
display: swap;
|
|
@ -8,30 +8,28 @@ import type {
|
|||
import { ShapeCache } from "../scene/ShapeCache";
|
||||
import { isTextElement } from "../element";
|
||||
import { getFontString } from "../utils";
|
||||
import { FONT_FAMILY } from "../constants";
|
||||
import {
|
||||
LOCAL_FONT_PROTOCOL,
|
||||
FONT_METADATA,
|
||||
RANGES,
|
||||
type FontMetadata,
|
||||
} from "./metadata";
|
||||
import { ExcalidrawFont, type Font } from "./ExcalidrawFont";
|
||||
import { getContainerElement } from "../element/textElement";
|
||||
|
||||
import Virgil from "./assets/Virgil-Regular.woff2";
|
||||
import Excalifont from "./assets/Excalifont-Regular.woff2";
|
||||
import Cascadia from "./assets/CascadiaCode-Regular.woff2";
|
||||
import ComicShanns from "./assets/ComicShanns-Regular.woff2";
|
||||
import LiberationSans from "./assets/LiberationSans-Regular.woff2";
|
||||
|
||||
import LilitaLatin from "./assets/Lilita-Regular-i7dPIFZ9Zz-WBtRtedDbYEF8RXi4EwQ.woff2";
|
||||
import LilitaLatinExt from "./assets/Lilita-Regular-i7dPIFZ9Zz-WBtRtedDbYE98RXi4EwSsbg.woff2";
|
||||
|
||||
import NunitoLatin from "./assets/Nunito-Regular-XRXI3I6Li01BKofiOc5wtlZ2di8HDIkhdTQ3j6zbXWjgeg.woff2";
|
||||
import NunitoLatinExt from "./assets/Nunito-Regular-XRXI3I6Li01BKofiOc5wtlZ2di8HDIkhdTo3j6zbXWjgevT5.woff2";
|
||||
import NunitoCyrilic from "./assets/Nunito-Regular-XRXI3I6Li01BKofiOc5wtlZ2di8HDIkhdTA3j6zbXWjgevT5.woff2";
|
||||
import NunitoCyrilicExt from "./assets/Nunito-Regular-XRXI3I6Li01BKofiOc5wtlZ2di8HDIkhdTk3j6zbXWjgevT5.woff2";
|
||||
import NunitoVietnamese from "./assets/Nunito-Regular-XRXI3I6Li01BKofiOc5wtlZ2di8HDIkhdTs3j6zbXWjgevT5.woff2";
|
||||
FONT_FAMILY,
|
||||
FONT_FAMILY_FALLBACKS,
|
||||
WINDOWS_EMOJI_FALLBACK_FONT,
|
||||
CJK_HAND_DRAWN_FALLBACK_FONT,
|
||||
} from "../constants";
|
||||
import { FONT_METADATA, type FontMetadata } from "./metadata";
|
||||
import { charWidth, getContainerElement } from "../element/textElement";
|
||||
import {
|
||||
ExcalidrawFontFace,
|
||||
type IExcalidrawFontFace,
|
||||
} from "./ExcalidrawFontFace";
|
||||
import { CascadiaFontFaces } from "./woff2/Cascadia";
|
||||
import { ComicFontFaces } from "./woff2/Comic";
|
||||
import { ExcalifontFontFaces } from "./woff2/Excalifont";
|
||||
import { HelveticaFontFaces } from "./woff2/Helvetica";
|
||||
import { LiberationFontFaces } from "./woff2/Liberation";
|
||||
import { LilitaFontFaces } from "./woff2/Lilita";
|
||||
import { NunitoFontFaces } from "./woff2/Nunito";
|
||||
import { VirgilFontFaces } from "./woff2/Virgil";
|
||||
import { XiaolaiFontFaces } from "./woff2/Xiaolai";
|
||||
import { EmojiFontFaces } from "./woff2/Emoji";
|
||||
|
||||
export class Fonts {
|
||||
// it's ok to track fonts across multiple instances only once, so let's use
|
||||
|
@ -43,7 +41,7 @@ export class Fonts {
|
|||
number,
|
||||
{
|
||||
metadata: FontMetadata;
|
||||
fonts: Font[];
|
||||
fontFaces: IExcalidrawFontFace[];
|
||||
}
|
||||
>
|
||||
| undefined;
|
||||
|
@ -85,20 +83,23 @@ export class Fonts {
|
|||
* of the supplied fontFaces has not already been processed.
|
||||
*/
|
||||
public onLoaded = (fontFaces: readonly FontFace[]) => {
|
||||
if (
|
||||
// bail if all fonts with have been processed. We're checking just a
|
||||
// subset of the font properties (though it should be enough), so it
|
||||
// can technically bail on a false positive.
|
||||
fontFaces.every((fontFace) => {
|
||||
const sig = `${fontFace.family}-${fontFace.style}-${fontFace.weight}-${fontFace.unicodeRange}`;
|
||||
if (Fonts.loadedFontsCache.has(sig)) {
|
||||
return true;
|
||||
}
|
||||
// bail if all fonts with have been processed. We're checking just a
|
||||
// subset of the font properties (though it should be enough), so it
|
||||
// can technically bail on a false positive.
|
||||
let shouldBail = true;
|
||||
|
||||
for (const fontFace of fontFaces) {
|
||||
const sig = `${fontFace.family}-${fontFace.style}-${fontFace.weight}-${fontFace.unicodeRange}`;
|
||||
|
||||
// make sure to update our cache with all the loaded font faces
|
||||
if (!Fonts.loadedFontsCache.has(sig)) {
|
||||
Fonts.loadedFontsCache.add(sig);
|
||||
return false;
|
||||
})
|
||||
) {
|
||||
return false;
|
||||
shouldBail = false;
|
||||
}
|
||||
}
|
||||
|
||||
if (shouldBail) {
|
||||
return;
|
||||
}
|
||||
|
||||
let didUpdate = false;
|
||||
|
@ -109,6 +110,10 @@ export class Fonts {
|
|||
if (isTextElement(element)) {
|
||||
didUpdate = true;
|
||||
ShapeCache.delete(element);
|
||||
|
||||
// clear the width cache, so that we don't perform subsequent wrapping based on the stale fallback font metrics
|
||||
charWidth.clearCache(getFontString(element));
|
||||
|
||||
const container = getContainerElement(element, elementsMap);
|
||||
if (container) {
|
||||
ShapeCache.delete(container);
|
||||
|
@ -125,26 +130,27 @@ export class Fonts {
|
|||
* Load font faces for a given scene and trigger scene update.
|
||||
*/
|
||||
public loadSceneFonts = async (): Promise<FontFace[]> => {
|
||||
const sceneFamilies = this.getSceneFontFamilies();
|
||||
const sceneFamilies = this.getSceneFamilies();
|
||||
const loaded = await Fonts.loadFontFaces(sceneFamilies);
|
||||
this.onLoaded(loaded);
|
||||
return loaded;
|
||||
};
|
||||
|
||||
/**
|
||||
* Gets all the font families for the given scene.
|
||||
* Load all registered font faces.
|
||||
*/
|
||||
public getSceneFontFamilies = () => {
|
||||
return Fonts.getFontFamilies(this.scene.getNonDeletedElements());
|
||||
public static loadAllFonts = async (): Promise<FontFace[]> => {
|
||||
const allFamilies = Fonts.getAllFamilies();
|
||||
return Fonts.loadFontFaces(allFamilies);
|
||||
};
|
||||
|
||||
/**
|
||||
* Load font faces for passed elements - use when the scene is unavailable (i.e. export).
|
||||
*/
|
||||
public static loadFontsForElements = async (
|
||||
public static loadElementsFonts = async (
|
||||
elements: readonly ExcalidrawElement[],
|
||||
): Promise<FontFace[]> => {
|
||||
const fontFamilies = Fonts.getFontFamilies(elements);
|
||||
const fontFamilies = Fonts.getElementsFamilies(elements);
|
||||
return await Fonts.loadFontFaces(fontFamilies);
|
||||
};
|
||||
|
||||
|
@ -152,13 +158,13 @@ export class Fonts {
|
|||
fontFamilies: Array<ExcalidrawTextElement["fontFamily"]>,
|
||||
) {
|
||||
// add all registered font faces into the `document.fonts` (if not added already)
|
||||
for (const { fonts, metadata } of Fonts.registered.values()) {
|
||||
for (const { fontFaces, metadata } of Fonts.registered.values()) {
|
||||
// skip registering font faces for local fonts (i.e. Helvetica)
|
||||
if (metadata.local) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for (const { fontFace } of fonts) {
|
||||
for (const { fontFace } of fontFaces) {
|
||||
if (!window.document.fonts.has(fontFace)) {
|
||||
window.document.fonts.add(fontFace);
|
||||
}
|
||||
|
@ -183,7 +189,7 @@ export class Fonts {
|
|||
console.error(
|
||||
`Failed to load font "${fontString}" from urls "${Fonts.registered
|
||||
.get(fontFamily)
|
||||
?.fonts.map((x) => x.urls)}"`,
|
||||
?.fontFaces.map((x) => x.urls)}"`,
|
||||
e,
|
||||
);
|
||||
}
|
||||
|
@ -202,82 +208,58 @@ export class Fonts {
|
|||
private static init() {
|
||||
const fonts = {
|
||||
registered: new Map<
|
||||
ValueOf<typeof FONT_FAMILY>,
|
||||
{ metadata: FontMetadata; fonts: Font[] }
|
||||
ValueOf<typeof FONT_FAMILY | typeof FONT_FAMILY_FALLBACKS>,
|
||||
{ metadata: FontMetadata; fontFaces: IExcalidrawFontFace[] }
|
||||
>(),
|
||||
};
|
||||
|
||||
// TODO: let's tweak this once we know how `register` will be exposed as part of the custom fonts API
|
||||
const _register = register.bind(fonts);
|
||||
const init = (
|
||||
family: keyof typeof FONT_FAMILY | keyof typeof FONT_FAMILY_FALLBACKS,
|
||||
...fontFacesDescriptors: ExcalidrawFontFaceDescriptor[]
|
||||
) => {
|
||||
const fontFamily =
|
||||
FONT_FAMILY[family as keyof typeof FONT_FAMILY] ??
|
||||
FONT_FAMILY_FALLBACKS[family as keyof typeof FONT_FAMILY_FALLBACKS];
|
||||
|
||||
_register("Virgil", FONT_METADATA[FONT_FAMILY.Virgil], {
|
||||
uri: Virgil,
|
||||
});
|
||||
// default to Excalifont metrics
|
||||
const metadata =
|
||||
FONT_METADATA[fontFamily] ?? FONT_METADATA[FONT_FAMILY.Excalifont];
|
||||
|
||||
_register("Excalifont", FONT_METADATA[FONT_FAMILY.Excalifont], {
|
||||
uri: Excalifont,
|
||||
});
|
||||
register.call(fonts, family, metadata, ...fontFacesDescriptors);
|
||||
};
|
||||
|
||||
init("Cascadia", ...CascadiaFontFaces);
|
||||
init("Comic Shanns", ...ComicFontFaces);
|
||||
init("Excalifont", ...ExcalifontFontFaces);
|
||||
// keeping for backwards compatibility reasons, uses system font (Helvetica on MacOS, Arial on Win)
|
||||
_register("Helvetica", FONT_METADATA[FONT_FAMILY.Helvetica], {
|
||||
uri: LOCAL_FONT_PROTOCOL,
|
||||
});
|
||||
|
||||
init("Helvetica", ...HelveticaFontFaces);
|
||||
// used for server-side pdf & png export instead of helvetica (technically does not need metrics, but kept in for consistency)
|
||||
_register(
|
||||
"Liberation Sans",
|
||||
FONT_METADATA[FONT_FAMILY["Liberation Sans"]],
|
||||
{
|
||||
uri: LiberationSans,
|
||||
},
|
||||
);
|
||||
init("Liberation Sans", ...LiberationFontFaces);
|
||||
init("Lilita One", ...LilitaFontFaces);
|
||||
init("Nunito", ...NunitoFontFaces);
|
||||
init("Virgil", ...VirgilFontFaces);
|
||||
|
||||
_register("Cascadia", FONT_METADATA[FONT_FAMILY.Cascadia], {
|
||||
uri: Cascadia,
|
||||
});
|
||||
|
||||
_register("Comic Shanns", FONT_METADATA[FONT_FAMILY["Comic Shanns"]], {
|
||||
uri: ComicShanns,
|
||||
});
|
||||
|
||||
_register(
|
||||
"Lilita One",
|
||||
FONT_METADATA[FONT_FAMILY["Lilita One"]],
|
||||
{ uri: LilitaLatinExt, descriptors: { unicodeRange: RANGES.LATIN_EXT } },
|
||||
{ uri: LilitaLatin, descriptors: { unicodeRange: RANGES.LATIN } },
|
||||
);
|
||||
|
||||
_register(
|
||||
"Nunito",
|
||||
FONT_METADATA[FONT_FAMILY.Nunito],
|
||||
{
|
||||
uri: NunitoCyrilicExt,
|
||||
descriptors: { unicodeRange: RANGES.CYRILIC_EXT, weight: "500" },
|
||||
},
|
||||
{
|
||||
uri: NunitoCyrilic,
|
||||
descriptors: { unicodeRange: RANGES.CYRILIC, weight: "500" },
|
||||
},
|
||||
{
|
||||
uri: NunitoVietnamese,
|
||||
descriptors: { unicodeRange: RANGES.VIETNAMESE, weight: "500" },
|
||||
},
|
||||
{
|
||||
uri: NunitoLatinExt,
|
||||
descriptors: { unicodeRange: RANGES.LATIN_EXT, weight: "500" },
|
||||
},
|
||||
{
|
||||
uri: NunitoLatin,
|
||||
descriptors: { unicodeRange: RANGES.LATIN, weight: "500" },
|
||||
},
|
||||
);
|
||||
// fallback font faces
|
||||
init(CJK_HAND_DRAWN_FALLBACK_FONT, ...XiaolaiFontFaces);
|
||||
init(WINDOWS_EMOJI_FALLBACK_FONT, ...EmojiFontFaces);
|
||||
|
||||
Fonts._initialized = true;
|
||||
|
||||
return fonts.registered;
|
||||
}
|
||||
|
||||
private static getFontFamilies(
|
||||
/**
|
||||
* Gets all the font families for the given scene.
|
||||
*/
|
||||
public getSceneFamilies = () => {
|
||||
return Fonts.getElementsFamilies(this.scene.getNonDeletedElements());
|
||||
};
|
||||
|
||||
private static getAllFamilies() {
|
||||
return Array.from(Fonts.registered.keys());
|
||||
}
|
||||
|
||||
private static getElementsFamilies(
|
||||
elements: ReadonlyArray<ExcalidrawElement>,
|
||||
): Array<ExcalidrawTextElement["fontFamily"]> {
|
||||
return Array.from(
|
||||
|
@ -296,30 +278,34 @@ export class Fonts {
|
|||
*
|
||||
* @param family font family
|
||||
* @param metadata font metadata
|
||||
* @param params array of the rest of the FontFace parameters [uri: string, descriptors: FontFaceDescriptors?] ,
|
||||
* @param fontFacesDecriptors font faces descriptors
|
||||
*/
|
||||
function register(
|
||||
this:
|
||||
| Fonts
|
||||
| {
|
||||
registered: Map<
|
||||
ValueOf<typeof FONT_FAMILY>,
|
||||
{ metadata: FontMetadata; fonts: Font[] }
|
||||
number,
|
||||
{ metadata: FontMetadata; fontFaces: IExcalidrawFontFace[] }
|
||||
>;
|
||||
},
|
||||
family: string,
|
||||
metadata: FontMetadata,
|
||||
...params: Array<{ uri: string; descriptors?: FontFaceDescriptors }>
|
||||
...fontFacesDecriptors: ExcalidrawFontFaceDescriptor[]
|
||||
) {
|
||||
// TODO: likely we will need to abandon number "id" in order to support custom fonts
|
||||
const familyId = FONT_FAMILY[family as keyof typeof FONT_FAMILY];
|
||||
const registeredFamily = this.registered.get(familyId);
|
||||
// TODO: likely we will need to abandon number value in order to support custom fonts
|
||||
const fontFamily =
|
||||
FONT_FAMILY[family as keyof typeof FONT_FAMILY] ??
|
||||
FONT_FAMILY_FALLBACKS[family as keyof typeof FONT_FAMILY_FALLBACKS];
|
||||
|
||||
const registeredFamily = this.registered.get(fontFamily);
|
||||
|
||||
if (!registeredFamily) {
|
||||
this.registered.set(familyId, {
|
||||
this.registered.set(fontFamily, {
|
||||
metadata,
|
||||
fonts: params.map(
|
||||
({ uri, descriptors }) => new ExcalidrawFont(family, uri, descriptors),
|
||||
fontFaces: fontFacesDecriptors.map(
|
||||
({ uri, descriptors }) =>
|
||||
new ExcalidrawFontFace(family, uri, descriptors),
|
||||
),
|
||||
});
|
||||
}
|
||||
|
@ -357,3 +343,8 @@ export const getLineHeight = (fontFamily: FontFamilyValues) => {
|
|||
|
||||
return lineHeight as ExcalidrawTextElement["lineHeight"];
|
||||
};
|
||||
|
||||
export interface ExcalidrawFontFaceDescriptor {
|
||||
uri: string;
|
||||
descriptors?: FontFaceDescriptors;
|
||||
}
|
||||
|
|
|
@ -4,7 +4,7 @@ import {
|
|||
FontFamilyNormalIcon,
|
||||
FreedrawIcon,
|
||||
} from "../components/icons";
|
||||
import { FONT_FAMILY } from "../constants";
|
||||
import { FONT_FAMILY, FONT_FAMILY_FALLBACKS } from "../constants";
|
||||
|
||||
/**
|
||||
* Encapsulates font metrics with additional font metadata.
|
||||
|
@ -22,13 +22,15 @@ export interface FontMetadata {
|
|||
lineHeight: number;
|
||||
};
|
||||
/** element to be displayed as an icon */
|
||||
icon: JSX.Element;
|
||||
icon?: JSX.Element;
|
||||
/** flag to indicate a deprecated font */
|
||||
deprecated?: true;
|
||||
/** flag to indicate a server-side only font */
|
||||
serverSide?: true;
|
||||
/** flag to indiccate a local-only font */
|
||||
local?: true;
|
||||
/** flag to indicate a fallback font */
|
||||
fallback?: true;
|
||||
}
|
||||
|
||||
export const FONT_METADATA: Record<number, FontMetadata> = {
|
||||
|
@ -106,13 +108,32 @@ export const FONT_METADATA: Record<number, FontMetadata> = {
|
|||
descender: -434,
|
||||
lineHeight: 1.15,
|
||||
},
|
||||
icon: FontFamilyNormalIcon,
|
||||
serverSide: true,
|
||||
},
|
||||
[FONT_FAMILY_FALLBACKS.Xiaolai]: {
|
||||
metrics: {
|
||||
unitsPerEm: 1000,
|
||||
ascender: 880,
|
||||
descender: -144,
|
||||
lineHeight: 1.15,
|
||||
},
|
||||
fallback: true,
|
||||
},
|
||||
[FONT_FAMILY_FALLBACKS["Segoe UI Emoji"]]: {
|
||||
metrics: {
|
||||
// reusing Excalifont metrics
|
||||
unitsPerEm: 1000,
|
||||
ascender: 886,
|
||||
descender: -374,
|
||||
lineHeight: 1.25,
|
||||
},
|
||||
local: true,
|
||||
fallback: true,
|
||||
},
|
||||
};
|
||||
|
||||
/** Unicode ranges */
|
||||
export const RANGES = {
|
||||
/** Unicode ranges defined by google fonts */
|
||||
export const GOOGLE_FONTS_RANGES = {
|
||||
LATIN:
|
||||
"U+0000-00FF, U+0131, U+0152-0153, U+02BB-02BC, U+02C6, U+02DA, U+02DC, U+0304, U+0308, U+0329, U+2000-206F, U+2074, U+20AC, U+2122, U+2191, U+2193, U+2212, U+2215, U+FEFF, U+FFFD",
|
||||
LATIN_EXT:
|
||||
|
|
131
packages/excalidraw/fonts/subset/subset-main.ts
Normal file
131
packages/excalidraw/fonts/subset/subset-main.ts
Normal file
|
@ -0,0 +1,131 @@
|
|||
import {
|
||||
WorkerInTheMainChunkError,
|
||||
WorkerUrlNotDefinedError,
|
||||
} from "../../errors";
|
||||
import { isServerEnv, promiseTry } from "../../utils";
|
||||
import { WorkerPool } from "../../workers";
|
||||
import type { Commands } from "./subset-shared.chunk";
|
||||
|
||||
let shouldUseWorkers = typeof Worker !== "undefined";
|
||||
|
||||
/**
|
||||
* Tries to subset glyphs in a font based on the used codepoints, returning the font as dataurl.
|
||||
* Under the hood utilizes worker threads (Web Workers, if available), otherwise fallbacks to the main thread.
|
||||
*
|
||||
* Check the following diagram for details: link.excalidraw.com/readonly/MbbnWPSWXgadXdtmzgeO
|
||||
*
|
||||
* @param arrayBuffer font data buffer in the woff2 format
|
||||
* @param codePoints codepoints used to subset the glyphs
|
||||
*
|
||||
* @returns font with subsetted glyphs (all glyphs in case of errors) converted into a dataurl
|
||||
*/
|
||||
export const subsetWoff2GlyphsByCodepoints = async (
|
||||
arrayBuffer: ArrayBuffer,
|
||||
codePoints: Array<number>,
|
||||
): Promise<string> => {
|
||||
const { Commands, subsetToBase64, toBase64 } =
|
||||
await lazyLoadSharedSubsetChunk();
|
||||
|
||||
if (!shouldUseWorkers) {
|
||||
return subsetToBase64(arrayBuffer, codePoints);
|
||||
}
|
||||
|
||||
return promiseTry(async () => {
|
||||
try {
|
||||
const workerPool = await getOrCreateWorkerPool();
|
||||
// copy the buffer to avoid working on top of the detached array buffer in the fallback
|
||||
// i.e. in case the worker throws, the array buffer does not get automatically detached, even if the worker is terminated
|
||||
const arrayBufferCopy = arrayBuffer.slice(0);
|
||||
const result = await workerPool.postMessage(
|
||||
{
|
||||
command: Commands.Subset,
|
||||
arrayBuffer: arrayBufferCopy,
|
||||
codePoints,
|
||||
} as const,
|
||||
{ transfer: [arrayBufferCopy] },
|
||||
);
|
||||
|
||||
// encode on the main thread to avoid copying large binary strings (as dataurl) between threads
|
||||
return toBase64(result);
|
||||
} catch (e) {
|
||||
// don't use workers if they are failing
|
||||
shouldUseWorkers = false;
|
||||
|
||||
if (
|
||||
// don't log the expected errors server-side
|
||||
!(
|
||||
isServerEnv() &&
|
||||
(e instanceof WorkerUrlNotDefinedError ||
|
||||
e instanceof WorkerInTheMainChunkError)
|
||||
)
|
||||
) {
|
||||
// eslint-disable-next-line no-console
|
||||
console.error(
|
||||
"Failed to use workers for subsetting, falling back to the main thread.",
|
||||
e,
|
||||
);
|
||||
}
|
||||
|
||||
// fallback to the main thread
|
||||
return subsetToBase64(arrayBuffer, codePoints);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
// lazy-loaded and cached chunks
|
||||
let subsetWorker: Promise<typeof import("./subset-worker.chunk")> | null = null;
|
||||
let subsetShared: Promise<typeof import("./subset-shared.chunk")> | null = null;
|
||||
|
||||
const lazyLoadWorkerSubsetChunk = async () => {
|
||||
if (!subsetWorker) {
|
||||
subsetWorker = import("./subset-worker.chunk");
|
||||
}
|
||||
|
||||
return subsetWorker;
|
||||
};
|
||||
|
||||
const lazyLoadSharedSubsetChunk = async () => {
|
||||
if (!subsetShared) {
|
||||
// load dynamically to force create a shared chunk reused between main thread and the worker thread
|
||||
subsetShared = import("./subset-shared.chunk");
|
||||
}
|
||||
|
||||
return subsetShared;
|
||||
};
|
||||
|
||||
// could be extended with multiple commands in the future
|
||||
type SubsetWorkerData = {
|
||||
command: typeof Commands.Subset;
|
||||
arrayBuffer: ArrayBuffer;
|
||||
codePoints: Array<number>;
|
||||
};
|
||||
|
||||
type SubsetWorkerResult<T extends SubsetWorkerData["command"]> =
|
||||
T extends typeof Commands.Subset ? ArrayBuffer : never;
|
||||
|
||||
let workerPool: Promise<
|
||||
WorkerPool<SubsetWorkerData, SubsetWorkerResult<SubsetWorkerData["command"]>>
|
||||
> | null = null;
|
||||
|
||||
/**
|
||||
* Lazy initialize or get the worker pool singleton.
|
||||
*
|
||||
* @throws implicitly if anything goes wrong - worker pool creation, loading wasm, initializing worker, etc.
|
||||
*/
|
||||
const getOrCreateWorkerPool = () => {
|
||||
if (!workerPool) {
|
||||
// immediate concurrent-friendly return, to ensure we have only one pool instance
|
||||
workerPool = promiseTry(async () => {
|
||||
const { WorkerUrl } = await lazyLoadWorkerSubsetChunk();
|
||||
|
||||
const pool = WorkerPool.create<
|
||||
SubsetWorkerData,
|
||||
SubsetWorkerResult<SubsetWorkerData["command"]>
|
||||
>(WorkerUrl);
|
||||
|
||||
return pool;
|
||||
});
|
||||
}
|
||||
|
||||
return workerPool;
|
||||
};
|
81
packages/excalidraw/fonts/subset/subset-shared.chunk.ts
Normal file
81
packages/excalidraw/fonts/subset/subset-shared.chunk.ts
Normal file
|
@ -0,0 +1,81 @@
|
|||
/**
|
||||
* DON'T depend on anything from the outside like `promiseTry`, as this module is part of a separate lazy-loaded chunk.
|
||||
*
|
||||
* Including anything from the main chunk would include the whole chunk by default.
|
||||
* Even it it would be tree-shaken during build, it won't be tree-shaken in dev.
|
||||
*
|
||||
* In the future consider separating common utils into a separate shared chunk.
|
||||
*/
|
||||
|
||||
import loadWoff2 from "../wasm/woff2-loader";
|
||||
import loadHbSubset from "../wasm/hb-subset-loader";
|
||||
|
||||
/**
|
||||
* Shared commands between the main thread and worker threads.
|
||||
*/
|
||||
export const Commands = {
|
||||
Subset: "SUBSET",
|
||||
} as const;
|
||||
|
||||
/**
|
||||
* Used by browser (main thread), node and jsdom, to subset the font based on the passed codepoints.
|
||||
*
|
||||
* @returns woff2 font as a base64 encoded string
|
||||
*/
|
||||
export const subsetToBase64 = async (
|
||||
arrayBuffer: ArrayBuffer,
|
||||
codePoints: Array<number>,
|
||||
): Promise<string> => {
|
||||
try {
|
||||
const buffer = await subsetToBinary(arrayBuffer, codePoints);
|
||||
return toBase64(buffer);
|
||||
} catch (e) {
|
||||
console.error("Skipped glyph subsetting", e);
|
||||
// Fallback to encoding whole font in case of errors
|
||||
return toBase64(arrayBuffer);
|
||||
}
|
||||
};
|
||||
|
||||
/**
|
||||
* Used by browser (worker thread) and as part of `subsetToBase64`, to subset the font based on the passed codepoints.
|
||||
*
|
||||
* @eturns woff2 font as an ArrayBuffer, to avoid copying large strings between worker threads and the main thread.
|
||||
*/
|
||||
export const subsetToBinary = async (
|
||||
arrayBuffer: ArrayBuffer,
|
||||
codePoints: Array<number>,
|
||||
): Promise<ArrayBuffer> => {
|
||||
// lazy loaded wasm modules to avoid multiple initializations in case of concurrent triggers
|
||||
// IMPORTANT: could be expensive, as each new worker instance lazy loads these to their own memory ~ keep the # of workes small!
|
||||
const { compress, decompress } = await loadWoff2();
|
||||
const { subset } = await loadHbSubset();
|
||||
|
||||
const decompressedBinary = decompress(arrayBuffer).buffer;
|
||||
const snftSubset = subset(decompressedBinary, new Set(codePoints));
|
||||
const compressedBinary = compress(snftSubset.buffer);
|
||||
|
||||
return compressedBinary.buffer;
|
||||
};
|
||||
|
||||
/**
|
||||
* Util for isomoprhic browser (main thread), node and jsdom usage.
|
||||
*
|
||||
* Isn't used inside the worker to avoid copying large binary strings (as dataurl) between worker threads and the main thread.
|
||||
*/
|
||||
export const toBase64 = async (arrayBuffer: ArrayBuffer) => {
|
||||
let base64: string;
|
||||
|
||||
if (typeof Buffer !== "undefined") {
|
||||
// node, jsdom
|
||||
base64 = Buffer.from(arrayBuffer).toString("base64");
|
||||
} else {
|
||||
// browser (main thread)
|
||||
// it's perfectly fine to treat each byte independently,
|
||||
// as we care only about turning individual bytes into codepoints,
|
||||
// not about multi-byte unicode characters
|
||||
const byteString = String.fromCharCode(...new Uint8Array(arrayBuffer));
|
||||
base64 = btoa(byteString);
|
||||
}
|
||||
|
||||
return `data:font/woff2;base64,${base64}`;
|
||||
};
|
42
packages/excalidraw/fonts/subset/subset-worker.chunk.ts
Normal file
42
packages/excalidraw/fonts/subset/subset-worker.chunk.ts
Normal file
|
@ -0,0 +1,42 @@
|
|||
/**
|
||||
* DON'T depend on anything from the outside like `promiseTry`, as this module is part of a separate lazy-loaded chunk.
|
||||
*
|
||||
* Including anything from the main chunk would include the whole chunk by default.
|
||||
* Even it it would be tree-shaken during build, it won't be tree-shaken in dev.
|
||||
*
|
||||
* In the future consider separating common utils into a separate shared chunk.
|
||||
*/
|
||||
|
||||
import { Commands, subsetToBinary } from "./subset-shared.chunk";
|
||||
|
||||
/**
|
||||
* Due to this export (and related dynamic import), this worker code will be included in the bundle automatically (as a separate chunk),
|
||||
* without the need for esbuild / vite /rollup plugins and special browser / server treatment.
|
||||
*
|
||||
* `import.meta.url` is undefined in nodejs
|
||||
*/
|
||||
export const WorkerUrl: URL | undefined = import.meta.url
|
||||
? new URL(import.meta.url)
|
||||
: undefined;
|
||||
|
||||
// run only in the worker context
|
||||
if (typeof window === "undefined" && typeof self !== "undefined") {
|
||||
self.onmessage = async (e: {
|
||||
data: {
|
||||
command: typeof Commands.Subset;
|
||||
arrayBuffer: ArrayBuffer;
|
||||
codePoints: Array<number>;
|
||||
};
|
||||
}) => {
|
||||
switch (e.data.command) {
|
||||
case Commands.Subset:
|
||||
const buffer = await subsetToBinary(
|
||||
e.data.arrayBuffer,
|
||||
e.data.codePoints,
|
||||
);
|
||||
|
||||
self.postMessage(buffer, { transfer: [buffer] });
|
||||
break;
|
||||
}
|
||||
};
|
||||
}
|
57
packages/excalidraw/fonts/wasm/hb-subset-loader.ts
Normal file
57
packages/excalidraw/fonts/wasm/hb-subset-loader.ts
Normal file
|
@ -0,0 +1,57 @@
|
|||
/**
|
||||
* DON'T depend on anything from the outside like `promiseTry`, as this module is part of a separate lazy-loaded chunk.
|
||||
*
|
||||
* Including anything from the main chunk would include the whole chunk by default.
|
||||
* Even it it would be tree-shaken during build, it won't be tree-shaken in dev.
|
||||
*
|
||||
* In the future consider separating common utils into a separate shared chunk.
|
||||
*/
|
||||
|
||||
import binary from "./hb-subset-wasm";
|
||||
import bindings from "./hb-subset-bindings";
|
||||
|
||||
/**
|
||||
* Lazy loads wasm and respective bindings for font subsetting based on the harfbuzzjs.
|
||||
*/
|
||||
let loadedWasm: ReturnType<typeof load> | null = null;
|
||||
|
||||
// TODO: consider adding support for fetching the wasm from an URL (external CDN, data URL, etc.)
|
||||
const load = (): Promise<{
|
||||
subset: (
|
||||
fontBuffer: ArrayBuffer,
|
||||
codePoints: ReadonlySet<number>,
|
||||
) => Uint8Array;
|
||||
}> => {
|
||||
return new Promise(async (resolve, reject) => {
|
||||
try {
|
||||
const module = await WebAssembly.instantiate(binary);
|
||||
const harfbuzzJsWasm = module.instance.exports;
|
||||
// @ts-expect-error since `.buffer` is custom prop
|
||||
const heapu8 = new Uint8Array(harfbuzzJsWasm.memory.buffer);
|
||||
|
||||
const hbSubset = {
|
||||
subset: (fontBuffer: ArrayBuffer, codePoints: ReadonlySet<number>) => {
|
||||
return bindings.subset(
|
||||
harfbuzzJsWasm,
|
||||
heapu8,
|
||||
fontBuffer,
|
||||
codePoints,
|
||||
);
|
||||
},
|
||||
};
|
||||
|
||||
resolve(hbSubset);
|
||||
} catch (e) {
|
||||
reject(e);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
// lazy load the default export
|
||||
export default (): ReturnType<typeof load> => {
|
||||
if (!loadedWasm) {
|
||||
loadedWasm = load();
|
||||
}
|
||||
|
||||
return loadedWasm;
|
||||
};
|
|
@ -1,58 +0,0 @@
|
|||
/**
|
||||
* Lazy loads wasm and respective bindings for font subsetting based on the harfbuzzjs.
|
||||
*/
|
||||
let loadedWasm: ReturnType<typeof load> | null = null;
|
||||
|
||||
// TODO: add support for fetching the wasm from an URL (external CDN, data URL, etc.)
|
||||
const load = (): Promise<{
|
||||
subset: (
|
||||
fontBuffer: ArrayBuffer,
|
||||
codePoints: ReadonlySet<number>,
|
||||
) => Uint8Array;
|
||||
}> => {
|
||||
return new Promise(async (resolve, reject) => {
|
||||
try {
|
||||
const [binary, bindings] = await Promise.all([
|
||||
import("./hb-subset.wasm"),
|
||||
import("./hb-subset.bindings"),
|
||||
]);
|
||||
|
||||
WebAssembly.instantiate(binary.default).then((module) => {
|
||||
try {
|
||||
const harfbuzzJsWasm = module.instance.exports;
|
||||
// @ts-expect-error since `.buffer` is custom prop
|
||||
const heapu8 = new Uint8Array(harfbuzzJsWasm.memory.buffer);
|
||||
|
||||
const hbSubset = {
|
||||
subset: (
|
||||
fontBuffer: ArrayBuffer,
|
||||
codePoints: ReadonlySet<number>,
|
||||
) => {
|
||||
return bindings.default.subset(
|
||||
harfbuzzJsWasm,
|
||||
heapu8,
|
||||
fontBuffer,
|
||||
codePoints,
|
||||
);
|
||||
},
|
||||
};
|
||||
|
||||
resolve(hbSubset);
|
||||
} catch (e) {
|
||||
reject(e);
|
||||
}
|
||||
});
|
||||
} catch (error) {
|
||||
reject(error);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
// lazy load the default export
|
||||
export default (): ReturnType<typeof load> => {
|
||||
if (!loadedWasm) {
|
||||
loadedWasm = load();
|
||||
}
|
||||
|
||||
return loadedWasm;
|
||||
};
|
|
@ -47,6 +47,7 @@ const Module = (function () {
|
|||
moduleOverrides[key] = Module[key];
|
||||
}
|
||||
}
|
||||
|
||||
let arguments_ = [];
|
||||
let thisProgram = "./this.program";
|
||||
let quit_ = function (status, toThrow) {
|
||||
|
@ -4046,3 +4047,5 @@ const Module = (function () {
|
|||
})();
|
||||
|
||||
export default Module;
|
||||
|
||||
|
76
packages/excalidraw/fonts/wasm/woff2-loader.ts
Normal file
76
packages/excalidraw/fonts/wasm/woff2-loader.ts
Normal file
|
@ -0,0 +1,76 @@
|
|||
/**
|
||||
* DON'T depend on anything from the outside like `promiseTry`, as this module is part of a separate lazy-loaded chunk.
|
||||
*
|
||||
* Including anything from the main chunk would include the whole chunk by default.
|
||||
* Even it it would be tree-shaken during build, it won't be tree-shaken in dev.
|
||||
*
|
||||
* In the future consider separating common utils into a separate shared chunk.
|
||||
*/
|
||||
|
||||
import binary from "./woff2-wasm";
|
||||
import bindings from "./woff2-bindings";
|
||||
|
||||
/**
|
||||
* Lazy loads wasm and respective bindings for woff2 compression and decompression.
|
||||
*/
|
||||
type Vector = any;
|
||||
|
||||
let loadedWasm: ReturnType<typeof load> | null = null;
|
||||
|
||||
// re-map from internal vector into byte array
|
||||
function convertFromVecToUint8Array(vector: Vector): Uint8Array {
|
||||
const arr = [];
|
||||
for (let i = 0, l = vector.size(); i < l; i++) {
|
||||
arr.push(vector.get(i));
|
||||
}
|
||||
|
||||
return new Uint8Array(arr);
|
||||
}
|
||||
|
||||
// TODO: consider adding support for fetching the wasm from an URL (external CDN, data URL, etc.)
|
||||
const load = (): Promise<{
|
||||
compress: (buffer: ArrayBuffer) => Uint8Array;
|
||||
decompress: (buffer: ArrayBuffer) => Uint8Array;
|
||||
}> => {
|
||||
return new Promise((resolve, reject) => {
|
||||
try {
|
||||
// initializing the module manually, so that we could pass in the wasm binary
|
||||
// note that the `bindings.then` is not not promise/A+ compliant, hence the need for another explicit try/catch
|
||||
bindings({ wasmBinary: binary }).then(
|
||||
(module: {
|
||||
woff2Enc: (buffer: ArrayBuffer, byteLength: number) => Vector;
|
||||
woff2Dec: (buffer: ArrayBuffer, byteLength: number) => Vector;
|
||||
}) => {
|
||||
try {
|
||||
// re-exporting only compress and decompress functions (also avoids infinite loop inside emscripten bindings)
|
||||
const woff2 = {
|
||||
compress: (buffer: ArrayBuffer) =>
|
||||
convertFromVecToUint8Array(
|
||||
module.woff2Enc(buffer, buffer.byteLength),
|
||||
),
|
||||
decompress: (buffer: ArrayBuffer) =>
|
||||
convertFromVecToUint8Array(
|
||||
module.woff2Dec(buffer, buffer.byteLength),
|
||||
),
|
||||
};
|
||||
|
||||
resolve(woff2);
|
||||
} catch (e) {
|
||||
reject(e);
|
||||
}
|
||||
},
|
||||
);
|
||||
} catch (e) {
|
||||
reject(e);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
// lazy loaded default export
|
||||
export default (): ReturnType<typeof load> => {
|
||||
if (!loadedWasm) {
|
||||
loadedWasm = load();
|
||||
}
|
||||
|
||||
return loadedWasm;
|
||||
};
|
|
@ -1,70 +0,0 @@
|
|||
/**
|
||||
* Lazy loads wasm and respective bindings for woff2 compression and decompression.
|
||||
*/
|
||||
type Vector = any;
|
||||
|
||||
let loadedWasm: ReturnType<typeof load> | null = null;
|
||||
|
||||
// TODO: add support for fetching the wasm from an URL (external CDN, data URL, etc.)
|
||||
const load = (): Promise<{
|
||||
compress: (buffer: ArrayBuffer) => Uint8Array;
|
||||
decompress: (buffer: ArrayBuffer) => Uint8Array;
|
||||
}> => {
|
||||
return new Promise(async (resolve, reject) => {
|
||||
try {
|
||||
const [binary, bindings] = await Promise.all([
|
||||
import("./woff2.wasm"),
|
||||
import("./woff2.bindings"),
|
||||
]);
|
||||
|
||||
// initializing the module manually, so that we could pass in the wasm binary
|
||||
bindings
|
||||
.default({ wasmBinary: binary.default })
|
||||
.then(
|
||||
(module: {
|
||||
woff2Enc: (buffer: ArrayBuffer, byteLength: number) => Vector;
|
||||
woff2Dec: (buffer: ArrayBuffer, byteLength: number) => Vector;
|
||||
}) => {
|
||||
try {
|
||||
// re-map from internal vector into byte array
|
||||
function convertFromVecToUint8Array(vector: Vector): Uint8Array {
|
||||
const arr = [];
|
||||
for (let i = 0, l = vector.size(); i < l; i++) {
|
||||
arr.push(vector.get(i));
|
||||
}
|
||||
|
||||
return new Uint8Array(arr);
|
||||
}
|
||||
|
||||
// re-exporting only compress and decompress functions (also avoids infinite loop inside emscripten bindings)
|
||||
const woff2 = {
|
||||
compress: (buffer: ArrayBuffer) =>
|
||||
convertFromVecToUint8Array(
|
||||
module.woff2Enc(buffer, buffer.byteLength),
|
||||
),
|
||||
decompress: (buffer: ArrayBuffer) =>
|
||||
convertFromVecToUint8Array(
|
||||
module.woff2Dec(buffer, buffer.byteLength),
|
||||
),
|
||||
};
|
||||
|
||||
resolve(woff2);
|
||||
} catch (e) {
|
||||
reject(e);
|
||||
}
|
||||
},
|
||||
);
|
||||
} catch (e) {
|
||||
reject(e);
|
||||
}
|
||||
});
|
||||
};
|
||||
|
||||
// lazy loaded default export
|
||||
export default (): ReturnType<typeof load> => {
|
||||
if (!loadedWasm) {
|
||||
loadedWasm = load();
|
||||
}
|
||||
|
||||
return loadedWasm;
|
||||
};
|
8
packages/excalidraw/fonts/woff2/Cascadia/index.ts
Normal file
8
packages/excalidraw/fonts/woff2/Cascadia/index.ts
Normal file
|
@ -0,0 +1,8 @@
|
|||
import CascadiaCodeRegular from "./CascadiaCode-Regular.woff2";
|
||||
import { type ExcalidrawFontFaceDescriptor } from "../..";
|
||||
|
||||
export const CascadiaFontFaces: ExcalidrawFontFaceDescriptor[] = [
|
||||
{
|
||||
uri: CascadiaCodeRegular,
|
||||
},
|
||||
];
|
8
packages/excalidraw/fonts/woff2/Comic/index.ts
Normal file
8
packages/excalidraw/fonts/woff2/Comic/index.ts
Normal file
|
@ -0,0 +1,8 @@
|
|||
import ComicShannsRegular from "./ComicShanns-Regular.woff2";
|
||||
import { type ExcalidrawFontFaceDescriptor } from "../..";
|
||||
|
||||
export const ComicFontFaces: ExcalidrawFontFaceDescriptor[] = [
|
||||
{
|
||||
uri: ComicShannsRegular,
|
||||
},
|
||||
];
|
8
packages/excalidraw/fonts/woff2/Emoji/index.ts
Normal file
8
packages/excalidraw/fonts/woff2/Emoji/index.ts
Normal file
|
@ -0,0 +1,8 @@
|
|||
import { LOCAL_FONT_PROTOCOL } from "../../metadata";
|
||||
import { type ExcalidrawFontFaceDescriptor } from "../..";
|
||||
|
||||
export const EmojiFontFaces: ExcalidrawFontFaceDescriptor[] = [
|
||||
{
|
||||
uri: LOCAL_FONT_PROTOCOL,
|
||||
},
|
||||
];
|
8
packages/excalidraw/fonts/woff2/Excalifont/index.ts
Normal file
8
packages/excalidraw/fonts/woff2/Excalifont/index.ts
Normal file
|
@ -0,0 +1,8 @@
|
|||
import Excalifont from "./Excalifont-Regular.woff2";
|
||||
import { type ExcalidrawFontFaceDescriptor } from "../..";
|
||||
|
||||
export const ExcalifontFontFaces: ExcalidrawFontFaceDescriptor[] = [
|
||||
{
|
||||
uri: Excalifont,
|
||||
},
|
||||
];
|
8
packages/excalidraw/fonts/woff2/Helvetica/index.ts
Normal file
8
packages/excalidraw/fonts/woff2/Helvetica/index.ts
Normal file
|
@ -0,0 +1,8 @@
|
|||
import { LOCAL_FONT_PROTOCOL } from "../../metadata";
|
||||
import { type ExcalidrawFontFaceDescriptor } from "../..";
|
||||
|
||||
export const HelveticaFontFaces: ExcalidrawFontFaceDescriptor[] = [
|
||||
{
|
||||
uri: LOCAL_FONT_PROTOCOL,
|
||||
},
|
||||
];
|
8
packages/excalidraw/fonts/woff2/Liberation/index.ts
Normal file
8
packages/excalidraw/fonts/woff2/Liberation/index.ts
Normal file
|
@ -0,0 +1,8 @@
|
|||
import LiberationSansRegular from "./LiberationSans-Regular.woff2";
|
||||
import { type ExcalidrawFontFaceDescriptor } from "../..";
|
||||
|
||||
export const LiberationFontFaces: ExcalidrawFontFaceDescriptor[] = [
|
||||
{
|
||||
uri: LiberationSansRegular,
|
||||
},
|
||||
];
|
16
packages/excalidraw/fonts/woff2/Lilita/index.ts
Normal file
16
packages/excalidraw/fonts/woff2/Lilita/index.ts
Normal file
|
@ -0,0 +1,16 @@
|
|||
import LilitaLatin from "./Lilita-Regular-i7dPIFZ9Zz-WBtRtedDbYEF8RXi4EwQ.woff2";
|
||||
import LilitaLatinExt from "./Lilita-Regular-i7dPIFZ9Zz-WBtRtedDbYE98RXi4EwSsbg.woff2";
|
||||
|
||||
import { GOOGLE_FONTS_RANGES } from "../../metadata";
|
||||
import { type ExcalidrawFontFaceDescriptor } from "../..";
|
||||
|
||||
export const LilitaFontFaces: ExcalidrawFontFaceDescriptor[] = [
|
||||
{
|
||||
uri: LilitaLatinExt,
|
||||
descriptors: { unicodeRange: GOOGLE_FONTS_RANGES.LATIN_EXT },
|
||||
},
|
||||
{
|
||||
uri: LilitaLatin,
|
||||
descriptors: { unicodeRange: GOOGLE_FONTS_RANGES.LATIN },
|
||||
},
|
||||
];
|
37
packages/excalidraw/fonts/woff2/Nunito/index.ts
Normal file
37
packages/excalidraw/fonts/woff2/Nunito/index.ts
Normal file
|
@ -0,0 +1,37 @@
|
|||
import Latin from "./Nunito-Regular-XRXI3I6Li01BKofiOc5wtlZ2di8HDIkhdTQ3j6zbXWjgeg.woff2";
|
||||
import LatinExt from "./Nunito-Regular-XRXI3I6Li01BKofiOc5wtlZ2di8HDIkhdTo3j6zbXWjgevT5.woff2";
|
||||
import Cyrilic from "./Nunito-Regular-XRXI3I6Li01BKofiOc5wtlZ2di8HDIkhdTA3j6zbXWjgevT5.woff2";
|
||||
import CyrilicExt from "./Nunito-Regular-XRXI3I6Li01BKofiOc5wtlZ2di8HDIkhdTk3j6zbXWjgevT5.woff2";
|
||||
import Vietnamese from "./Nunito-Regular-XRXI3I6Li01BKofiOc5wtlZ2di8HDIkhdTs3j6zbXWjgevT5.woff2";
|
||||
|
||||
import { GOOGLE_FONTS_RANGES } from "../../metadata";
|
||||
import { type ExcalidrawFontFaceDescriptor } from "../..";
|
||||
|
||||
export const NunitoFontFaces: ExcalidrawFontFaceDescriptor[] = [
|
||||
{
|
||||
uri: CyrilicExt,
|
||||
descriptors: {
|
||||
unicodeRange: GOOGLE_FONTS_RANGES.CYRILIC_EXT,
|
||||
weight: "500",
|
||||
},
|
||||
},
|
||||
{
|
||||
uri: Cyrilic,
|
||||
descriptors: { unicodeRange: GOOGLE_FONTS_RANGES.CYRILIC, weight: "500" },
|
||||
},
|
||||
{
|
||||
uri: Vietnamese,
|
||||
descriptors: {
|
||||
unicodeRange: GOOGLE_FONTS_RANGES.VIETNAMESE,
|
||||
weight: "500",
|
||||
},
|
||||
},
|
||||
{
|
||||
uri: LatinExt,
|
||||
descriptors: { unicodeRange: GOOGLE_FONTS_RANGES.LATIN_EXT, weight: "500" },
|
||||
},
|
||||
{
|
||||
uri: Latin,
|
||||
descriptors: { unicodeRange: GOOGLE_FONTS_RANGES.LATIN, weight: "500" },
|
||||
},
|
||||
];
|
8
packages/excalidraw/fonts/woff2/Virgil/index.ts
Normal file
8
packages/excalidraw/fonts/woff2/Virgil/index.ts
Normal file
|
@ -0,0 +1,8 @@
|
|||
import Virgil from "./Virgil-Regular.woff2";
|
||||
import { type ExcalidrawFontFaceDescriptor } from "../..";
|
||||
|
||||
export const VirgilFontFaces: ExcalidrawFontFaceDescriptor[] = [
|
||||
{
|
||||
uri: Virgil,
|
||||
},
|
||||
];
|
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue