1
0

Compare commits

..

No commits in common. "2f6dc95a7f3dfe74aeef9fcfb806c7c1a20b1d7d" and "9dd51ac01226fec78a7c3c3d85f05445c98e919d" have entirely different histories.

4 changed files with 12 additions and 38 deletions

View File

@ -15,7 +15,6 @@ PACKAGE_NAME = 'socials_util'
with open('README.md') as f: with open('README.md') as f:
readme = f.read() readme = f.read()
def parse_version_file(text: str) -> str: def parse_version_file(text: str) -> str:
match = re.match(r'^__version__\s*=\s*(["\'])([\d\.]+)\1$', text) match = re.match(r'^__version__\s*=\s*(["\'])([\d\.]+)\1$', text)
if match is None: if match is None:
@ -23,11 +22,9 @@ def parse_version_file(text: str) -> str:
raise Exception(msg) raise Exception(msg)
return match.group(2) return match.group(2)
with open(PACKAGE_NAME + '/_version.py') as f: with open(PACKAGE_NAME + '/_version.py') as f:
version = parse_version_file(f.read()) version = parse_version_file(f.read())
def parse_requirements(text: str) -> list[str]: def parse_requirements(text: str) -> list[str]:
return text.strip().split('\n') return text.strip().split('\n')

View File

@ -188,17 +188,12 @@ WIKIDATA_PROPERTIES: dict[SocialSiteId | int, WikidataInfo] = {
def re_social_subdomain(main_domain: str) -> str: def re_social_subdomain(main_domain: str) -> str:
return ( return r'^(?:https?:\/\/)?([\w_-]+)\.' + re.escape(main_domain) + r'(\/.*)?$'
r'^(?:https?:\/\/)?(?:www\.)?([\w_-]+)\.' + re.escape(main_domain) + r'(\/.*)?$'
)
RE_ID = r'@?([^\s/]+)' RE_ID = r'@?([^/]+)'
RE_DUAL_ID = r'@?([^\s/]+/[^\s/]+)' RE_DUAL_ID = r'@?([^/]+/[^/]+)'
RE_ANY_SUBPATH = r'(|\/|\/\S*)$' RE_ANY_SUBPATH = r'(|\/|\/.*)$'
SPECIAL_REGEX_LITERALS = frozenset({RE_ID, RE_DUAL_ID, RE_ANY_SUBPATH})
DOES_NOT_NEED_AUTO_SLASH = frozenset({RE_ANY_SUBPATH})
def re_social_path(main_domain: str) -> str: def re_social_path(main_domain: str) -> str:
@ -217,13 +212,12 @@ def re_social_path_adv(main_domain: str, *path: str) -> str:
] ]
for p in path: for p in path:
if p not in DOES_NOT_NEED_AUTO_SLASH: if p != RE_ANY_SUBPATH:
regex_builder.append(r'\/') regex_builder.append(r'\/')
regex_builder.append( regex_builder.append(
p if p in SPECIAL_REGEX_LITERALS else re.escape(p), p if p in {RE_ID, RE_DUAL_ID, RE_ANY_SUBPATH} else re.escape(p),
) )
del p if path[-1] != RE_ANY_SUBPATH:
if path[-1] not in DOES_NOT_NEED_AUTO_SLASH:
regex_builder.append(r'\/?$') regex_builder.append(r'\/?$')
return ''.join(regex_builder) return ''.join(regex_builder)
@ -266,9 +260,7 @@ PIXIV_SKETCH_USER_NICKNAME_URL = re_social_path_adv('sketch.pixiv.net', RE_ID)
URL_PARSE_CARRD_PAGE = re_social_subdomain('carrd.co') URL_PARSE_CARRD_PAGE = re_social_subdomain('carrd.co')
URL_PARSE_YOUTUBE_CHANNEL_HANDLE_1 = re_social_path_adv( URL_PARSE_YOUTUBE_CHANNEL_HANDLE_1 = re_social_path_adv(
'youtube.com', 'youtube.com', RE_ID, RE_ANY_SUBPATH
RE_ID,
RE_ANY_SUBPATH,
) )
URL_PARSE_YOUTUBE_CHANNEL_HANDLE_2 = re_social_path_adv('youtube.com', 'c', RE_ID) URL_PARSE_YOUTUBE_CHANNEL_HANDLE_2 = re_social_path_adv('youtube.com', 'c', RE_ID)
URL_PARSE_YOUTUBE_CHANNEL_ID = re_social_path_adv('youtube.com', 'channel', RE_ID) URL_PARSE_YOUTUBE_CHANNEL_ID = re_social_path_adv('youtube.com', 'channel', RE_ID)
@ -421,7 +413,7 @@ WELL_KNOWN_MASTODON_INSTANCES: frozenset[str] = frozenset(
}, },
) )
DISALLOWED_IDENTIFIERS: frozenset[str] = frozenset({'www', 'intent', 'user'}) DISALLOWED_IDENTIFIERS: frozenset[str] = frozenset({'www'})
def determine_social_from_url_internally( def determine_social_from_url_internally(

View File

@ -1,11 +1,10 @@
import pytest import pytest
from socials_util import SocialLink, SocialSiteId, determine_social_from_url from socials_util import *
PARSABLE_SOCIAL_IDS_COMBINED: list[tuple[str, object, str]] = [ PARSABLE_SOCIAL_IDS_COMBINED: list[tuple[str, object, str]] = [
# Tumblr formats # Tumblr formats
('https://triviallytrue.tumblr.com/', SocialSiteId.TUMBLR, 'triviallytrue'), ('https://triviallytrue.tumblr.com/', SocialSiteId.TUMBLR, 'triviallytrue'),
('https://www.triviallytrue.tumblr.com/', SocialSiteId.TUMBLR, 'triviallytrue'),
('https://tumblr.com/triviallytrue', SocialSiteId.TUMBLR, 'triviallytrue'), ('https://tumblr.com/triviallytrue', SocialSiteId.TUMBLR, 'triviallytrue'),
('https://tumblr.com/blog/triviallytrue', SocialSiteId.TUMBLR, 'triviallytrue'), ('https://tumblr.com/blog/triviallytrue', SocialSiteId.TUMBLR, 'triviallytrue'),
( (
@ -180,18 +179,9 @@ PARSABLE_SOCIAL_IDS_COMBINED: list[tuple[str, object, str]] = [
('https://solquiet.deviantart.com/', SocialSiteId.DEVIANT_ART_ACCOUNT, 'solquiet'), ('https://solquiet.deviantart.com/', SocialSiteId.DEVIANT_ART_ACCOUNT, 'solquiet'),
] ]
NOT_PARSABLE = [
# Twitter intents are not supported
'twitter.com/intent/user?user_id=123',
'https://twitter.com/intent/user?user_id=123',
'https://twitter.com/intent/user',
'https://twitter.com/intent',
]
@pytest.mark.parametrize( @pytest.mark.parametrize(
'url,expected_social_site_id,expected_social_id', 'url,expected_social_site_id,expected_social_id', PARSABLE_SOCIAL_IDS_COMBINED
PARSABLE_SOCIAL_IDS_COMBINED,
) )
def test_parse_social_ids(url, expected_social_site_id, expected_social_id): def test_parse_social_ids(url, expected_social_site_id, expected_social_id):
social_link: SocialLink | None = determine_social_from_url(url) social_link: SocialLink | None = determine_social_from_url(url)
@ -200,8 +190,3 @@ def test_parse_social_ids(url, expected_social_site_id, expected_social_id):
expected_social_id, expected_social_id,
expected_social_site_id, expected_social_site_id,
), url ), url
@pytest.mark.parametrize('url', NOT_PARSABLE)
def test_not_parsable(url: str):
assert determine_social_from_url(url) is None