2018-05-08 21:22:17 +00:00
|
|
|
# This Source Code Form is subject to the terms of the Mozilla Public
|
|
|
|
# License, v. 2.0. If a copy of the MPL was not distributed with this
|
|
|
|
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
|
|
|
|
import re
|
|
|
|
from functools import partial
|
|
|
|
|
|
|
|
from telethon import events
|
|
|
|
from telethon.tl.functions.messages import EditMessageRequest
|
2018-07-19 07:59:33 +00:00
|
|
|
from telethon.extensions.markdown import DEFAULT_URL_RE
|
|
|
|
from telethon.utils import add_surrogate, del_surrogate
|
2018-11-03 01:10:49 +00:00
|
|
|
from telethon.tl.types import MessageEntityTextUrl
|
2018-05-08 21:22:17 +00:00
|
|
|
|
|
|
|
|
|
|
|
def parse_url_match(m):
|
|
|
|
entity = MessageEntityTextUrl(
|
|
|
|
offset=m.start(),
|
|
|
|
length=len(m.group(1)),
|
2018-07-19 07:59:33 +00:00
|
|
|
url=del_surrogate(m.group(2))
|
2018-05-08 21:22:17 +00:00
|
|
|
)
|
|
|
|
return m.group(1), entity
|
|
|
|
|
|
|
|
|
2018-06-14 19:20:41 +00:00
|
|
|
def parse_aesthetics(m):
|
|
|
|
def aesthetify(string):
|
|
|
|
for c in string:
|
2018-12-19 14:58:49 +00:00
|
|
|
if " " < c <= "~":
|
|
|
|
yield chr(ord(c) + 0xFF00 - 0x20)
|
|
|
|
elif c == " ":
|
|
|
|
yield "\u3000"
|
|
|
|
else:
|
|
|
|
yield c
|
2018-06-14 19:20:41 +00:00
|
|
|
return "".join(aesthetify(m[1])), None
|
|
|
|
|
|
|
|
|
2018-12-19 14:52:09 +00:00
|
|
|
def parse_strikethrough(m):
|
|
|
|
return ("\u0336".join(m[1]) + "\u0336"), None
|
|
|
|
|
|
|
|
|
2018-05-08 21:53:00 +00:00
|
|
|
def parse_subreddit(m):
|
|
|
|
text = '/' + m.group(3)
|
|
|
|
entity = MessageEntityTextUrl(
|
|
|
|
offset=m.start(2),
|
|
|
|
length=len(text),
|
|
|
|
url=f'reddit.com{text}'
|
|
|
|
)
|
|
|
|
return m.group(1) + text, entity
|
|
|
|
|
|
|
|
|
2018-05-08 22:50:20 +00:00
|
|
|
def parse_snip(m):
|
|
|
|
try:
|
|
|
|
name = m.group(1)[1:]
|
|
|
|
snip = borg._plugins['snip'].storage.snips[name]
|
|
|
|
if snip['type'] == borg._plugins['snip'].TYPE_TEXT:
|
|
|
|
return snip['text'], None
|
|
|
|
except KeyError:
|
|
|
|
pass
|
|
|
|
return m.group(1), None
|
|
|
|
|
|
|
|
|
2018-06-11 11:26:12 +00:00
|
|
|
# A matcher is a tuple of (regex pattern, parse function)
|
2018-05-08 23:46:21 +00:00
|
|
|
# where the parse function takes the match and returns (text, entity)
|
2018-05-08 21:22:17 +00:00
|
|
|
MATCHERS = [
|
|
|
|
(DEFAULT_URL_RE, parse_url_match),
|
2019-08-14 21:53:15 +00:00
|
|
|
(re.compile(r'!\+(.+?)\+!'), parse_aesthetics),
|
2018-12-19 14:52:09 +00:00
|
|
|
(re.compile(r'~~(.+?)~~'), parse_strikethrough),
|
2018-05-08 22:50:20 +00:00
|
|
|
(re.compile(r'([^/\w]|^)(/?(r/\w+))'), parse_subreddit),
|
2018-05-27 09:20:22 +00:00
|
|
|
(re.compile(r'(!\w+)'), parse_snip)
|
2018-05-08 21:22:17 +00:00
|
|
|
]
|
|
|
|
|
|
|
|
|
2018-06-11 11:26:12 +00:00
|
|
|
def parse(message, old_entities=None):
|
2018-05-08 21:22:17 +00:00
|
|
|
entities = []
|
2018-06-11 11:26:12 +00:00
|
|
|
old_entities = sorted(old_entities or [], key=lambda e: e.offset)
|
2018-05-08 21:22:17 +00:00
|
|
|
|
|
|
|
i = 0
|
2018-06-11 11:26:12 +00:00
|
|
|
after = 0
|
2018-07-19 07:59:33 +00:00
|
|
|
message = add_surrogate(message)
|
2018-05-08 21:22:17 +00:00
|
|
|
while i < len(message):
|
2018-06-11 11:26:12 +00:00
|
|
|
for after, e in enumerate(old_entities[after:], start=after):
|
|
|
|
# If the next entity is strictly to our right, we're done here
|
|
|
|
if i < e.offset:
|
|
|
|
break
|
|
|
|
# Skip already existing entities if we're at one
|
|
|
|
if i == e.offset:
|
|
|
|
i += e.length
|
2018-12-21 18:25:16 +00:00
|
|
|
else:
|
|
|
|
after += 1
|
2018-06-11 11:26:12 +00:00
|
|
|
|
|
|
|
# Find the first pattern that matches
|
2018-05-08 21:22:17 +00:00
|
|
|
for pattern, parser in MATCHERS:
|
|
|
|
match = pattern.match(message, pos=i)
|
|
|
|
if match:
|
|
|
|
break
|
2018-05-08 21:56:44 +00:00
|
|
|
else:
|
|
|
|
i += 1
|
2018-05-08 21:22:17 +00:00
|
|
|
continue
|
|
|
|
|
2018-05-08 21:56:44 +00:00
|
|
|
text, entity = parser(match)
|
2018-06-11 11:26:12 +00:00
|
|
|
|
|
|
|
# Shift old entities after our current position (so they stay in place)
|
|
|
|
shift = len(text) - len(match[0])
|
|
|
|
if shift:
|
|
|
|
for e in old_entities[after:]:
|
|
|
|
e.offset += shift
|
|
|
|
|
|
|
|
# Replace whole match with text from parser
|
2018-05-08 21:56:44 +00:00
|
|
|
message = ''.join((
|
|
|
|
message[:match.start()],
|
|
|
|
text,
|
|
|
|
message[match.end():]
|
|
|
|
))
|
|
|
|
|
2018-06-11 11:26:12 +00:00
|
|
|
# Append entity if we got one
|
2018-05-08 21:56:44 +00:00
|
|
|
if entity:
|
|
|
|
entities.append(entity)
|
|
|
|
|
2018-06-11 11:26:12 +00:00
|
|
|
# Skip past the match
|
2018-05-08 21:56:44 +00:00
|
|
|
i += len(text)
|
2018-05-08 21:22:17 +00:00
|
|
|
|
2018-07-19 07:59:33 +00:00
|
|
|
return del_surrogate(message), entities + old_entities
|
2018-05-08 21:22:17 +00:00
|
|
|
|
|
|
|
|
|
|
|
@borg.on(events.MessageEdited(outgoing=True))
|
|
|
|
@borg.on(events.NewMessage(outgoing=True))
|
|
|
|
async def reparse(event):
|
2018-06-11 11:26:12 +00:00
|
|
|
old_entities = event.message.entities or []
|
|
|
|
parser = partial(parse, old_entities=old_entities)
|
|
|
|
message, msg_entities = await borg._parse_message_text(event.raw_text, parser)
|
|
|
|
if len(old_entities) >= len(msg_entities) and event.raw_text == message:
|
2018-05-08 21:22:17 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
await borg(EditMessageRequest(
|
2018-06-22 08:46:39 +00:00
|
|
|
peer=await event.get_input_chat(),
|
2018-05-08 21:22:17 +00:00
|
|
|
id=event.message.id,
|
|
|
|
message=message,
|
|
|
|
no_webpage=not bool(event.message.media),
|
|
|
|
entities=msg_entities
|
|
|
|
))
|
|
|
|
raise events.StopPropagation
|