1
0
personal-data/personal_data/fetchers/psnprofiles.py

115 lines
3.9 KiB
Python
Raw Normal View History

2024-02-25 00:38:44 +00:00
import secrets
import functools
import re
import logging
import bs4
import datetime
2024-02-25 19:20:37 +00:00
from personal_data.data import Scraper, DeduplicateMode
2024-02-25 00:38:44 +00:00
import personal_data.html_util
import personal_data.parse_util
logger = logging.getLogger(__name__)
URL_PROFILE = 'https://psnprofiles.com/{psn_id}'
FORMAT_DATE_HEADER = '%a, %d %b %Y %H:%M:%S GMT'
FORMAT_DAY_MONTH_YEAR = '%d %B %Y'
def game_psnprofiles_id_from_url(relative_url: str) -> int:
m = re.match(r'/(?:trophy|trophies)/(\d+)\-(?:[\w-]+)(/[\w-]*)?', relative_url)
result = m.group(1)
return int(result)
assert game_psnprofiles_id_from_url('/trophies/21045-theatrhythm-final-bar-line')
assert game_psnprofiles_id_from_url('/trophies/21045-theatrhythm-final-bar-line/')
assert game_psnprofiles_id_from_url('/trophies/21045-theatrhythm-final-bar-line/Jmaanes')
assert game_psnprofiles_id_from_url('/trophy/21045-theatrhythm-final-bar-line/19-seasoned-hunter')
def scrape_personal_page(session):
# Request to get watch history
logger.info('Getting Watchlist')
url = URL_PROFILE.format(psn_id = secrets.PLAYSTATION_PSN_ID)
response = session.get(url)
response.raise_for_status()
NOW = datetime.datetime.strptime(response.headers['Date'], FORMAT_DATE_HEADER)
# Parse data
soup = bs4.BeautifulSoup(response.content, 'lxml')
soup = personal_data.html_util.normalize_soup_slightly(soup, classes = False)
# Recent trophies.
soup_recent_tropies = soup.select('ul#recent-trophies > li')
assert len(soup_recent_tropies) > 0, url
for row in soup_recent_tropies:
cells = row.select_one('.info .box td').find_all('div')
trophy_name = cells[0].get_text().strip()
trophy_desc = cells[1].get_text().strip()
game_name = cells[2].a.extract().get_text().strip()
psnprofiles_id = game_psnprofiles_id_from_url(cells[0].find('a')['href'])
trophy_icon = row.find(class_='icon').find('img')['src']
gotten_at = cells[2].get_text().strip().removesuffix(' in').removesuffix(' ago')
gotten_at = personal_data.parse_util.parse_duration(gotten_at)
time_acquired = NOW - gotten_at
yield {
'game.name' : game_name,
'me.last_played_time': time_acquired,
# Trophy Data
'trophy.name': trophy_name,
'trophy.desc': trophy_desc,
'trophy.icon': trophy_icon,
'psnprofiles.game_id': psnprofiles_id,
}
del row, cells, time_acquired
# Games table
table_rows = soup.find(id = 'gamesTable').find_all('tr')
assert len(table_rows) > 0, url
for row in table_rows:
cells = row.find_all('td')
# Check for pagination
if re.match(r'show \d+ more games', cells[0].get_text().strip(), re.IGNORECASE):
break
game_name = cells[1].find(class_ = 'title').get_text()
psnprofiles_id = game_psnprofiles_id_from_url(cells[0].find('a')['href'])
game_icon = cells[0].find('img')['src']
game_name = row.select_one('.title').get_text()
game_platform = row.select_one('.platform').get_text()
small_infos = cells[1].find_all('div')
if len(small_infos) > 2:
time_played_div = small_infos[2]
time_played_div.sup.extract()
time_played = datetime.datetime.strptime(time_played_div.get_text().strip(), FORMAT_DAY_MONTH_YEAR).date()
else:
time_played = None
d = {
# Important fields
'game.name': game_name,
# Secondary fields
'game.platform': game_platform,
'game.icon': game_icon,
'psnprofiles.game_id': psnprofiles_id,
}
if time_played:
d['me.last_played_time'] = time_played
yield d
SCRAPERS = [
2024-02-25 19:20:37 +00:00
Scraper(scrape_personal_page, 'games_played_playstation',
deduplicate_mode = DeduplicateMode.BY_ALL_COLUMNS)
2024-02-25 00:38:44 +00:00
]