refactor: add url token and fix telegram adapt problem

This commit is contained in:
moonrailgun 2024-01-11 00:34:16 +08:00
parent 5b826cab34
commit e6f02677e5
7 changed files with 59 additions and 16 deletions

View File

@ -22,7 +22,7 @@ export const telegram: NotificationProvider = {
await axios.post(`https://api.telegram.org/bot${botToken}/sendMessage`, {
chat_id: chatId,
text,
parse_mode: 'MarkdownV2',
parse_mode: 'HTML',
});
// send image

View File

@ -9,6 +9,7 @@ import {
ParagraphContentToken,
TextContentToken,
TitleContentToken,
UrlContentToken,
} from './type';
export type { ContentToken };
@ -34,6 +35,11 @@ export const token = {
newline: (): NewlineContentToken => ({
type: 'newline',
}),
url: (url: string, title?: string): UrlContentToken => ({
type: 'url',
url,
title,
}),
};
export const baseContentTokenizer = new BaseContentTokenizer();

View File

@ -5,6 +5,7 @@ import {
ParagraphContentToken,
TextContentToken,
TitleContentToken,
UrlContentToken,
} from '../type';
export class BaseContentTokenizer {
@ -28,6 +29,10 @@ export class BaseContentTokenizer {
return '\n';
}
parseUrl(token: UrlContentToken) {
return token.url;
}
parse(tokens: ContentToken[]) {
return tokens
.map((token) => {

View File

@ -3,6 +3,7 @@ import {
NewlineContentToken,
ParagraphContentToken,
TitleContentToken,
UrlContentToken,
} from '../type';
import { BaseContentTokenizer } from './base';
@ -32,4 +33,8 @@ export class HTMLContentTokenizer extends BaseContentTokenizer {
parseNewline(token: NewlineContentToken) {
return '<br />';
}
parseUrl(token: UrlContentToken): string {
return `<a href="${token.url}">${token.title ?? token.url}</a>`;
}
}

View File

@ -2,6 +2,7 @@ import {
ImageContentToken,
ParagraphContentToken,
TitleContentToken,
UrlContentToken,
} from '../type';
import { BaseContentTokenizer } from './base';
@ -27,4 +28,8 @@ export class MarkdownContentTokenizer extends BaseContentTokenizer {
return `\n${token.content}\n`;
}
parseUrl(token: UrlContentToken): string {
return `[${token.title ?? ''}](${token.url})`;
}
}

View File

@ -1,22 +1,37 @@
import { ImageContentToken, TitleContentToken } from '../type';
import { MarkdownContentTokenizer } from './markdown';
import {
ImageContentToken,
ParagraphContentToken,
TextContentToken,
TitleContentToken,
UrlContentToken,
} from '../type';
import { BaseContentTokenizer } from './base';
export class TelegramContentTokenizer extends MarkdownContentTokenizer {
export class TelegramContentTokenizer extends BaseContentTokenizer {
parseImage(token: ImageContentToken) {
return '';
}
parseTitle(token: TitleContentToken) {
if (token.level === 1) {
return `\n\\# ${token.content}\n`;
}
if (token.level === 2) {
return `\n\\#\\# ${token.content}\n`;
}
if (token.level === 3) {
return `\n\\#\\#\\# ${token.content}\n`;
}
parseText(token: TextContentToken): string {
return this.parseEntityCharacter(token.content);
}
return `\n${token.content}\n`;
parseTitle(token: TitleContentToken) {
return `\n<b>${this.parseEntityCharacter(token.content)}</b>\n`;
}
parseParagraph(token: ParagraphContentToken) {
return `\n${this.parseEntityCharacter(token.content)}\n`;
}
parseUrl(token: UrlContentToken): string {
return `<a href="${token.url}">${token.title ?? token.url}</a>`;
}
private parseEntityCharacter(input: string): string {
return input
.replaceAll('<', '&lt;')
.replaceAll('>', '&gt;')
.replaceAll('&', '&amp;');
}
}

View File

@ -23,9 +23,16 @@ export type NewlineContentToken = {
type: 'newline';
};
export type UrlContentToken = {
type: 'url';
url: string;
title?: string;
};
export type ContentToken =
| TextContentToken
| ImageContentToken
| TitleContentToken
| ParagraphContentToken
| NewlineContentToken;
| NewlineContentToken
| UrlContentToken;