1
0

Restructure to use subclassing

This commit is contained in:
Jon Michael Aanes 2024-03-03 16:59:03 +01:00
parent a53c1c381d
commit a03ba73dcd
6 changed files with 168 additions and 144 deletions

View File

@ -5,7 +5,10 @@ This is a collection of small fetchers for personal data spread around the inter
## Ideas for more fetchers
- [ ] Final Fantasy XIV: Achievements & Mounts
- [ ] Save data for Theatherythm: Most played songs, and liked songs.
- [ ] YouTube (Music): Liked videos with title and URL.
- [ ] Playstation: Achivement dates, hours played and last played dates
- [ ] Steam Wishlist and Achievements
- [ ] fredagscafeen.dk

View File

@ -19,13 +19,6 @@ import personal_data.data
CSV_DIALECT = 'one_true_dialect'
csv.register_dialect(CSV_DIALECT, lineterminator = '\n', skipinitialspace = True)
def determine_scrapers():
scrapers = []
#scrapers += personal_data.fetchers.playstation.SCRAPERS
scrapers += personal_data.fetchers.crunchyroll.SCRAPERS
scrapers += personal_data.fetchers.psnprofiles.SCRAPERS
return scrapers
def try_value(fn, s: str) -> any:
try:
return fn(s)
@ -111,15 +104,18 @@ def main():
for cookie in cookiejar:
session.cookies.set_cookie(cookie)
for scraper in determine_scrapers():
for scraper_cls in personal_data.data.Scraper.__subclasses__():
scraper = scraper_cls(session)
del scraper_cls
logger.warning('Running scraper: %s', scraper.dataset_name)
result_rows = list()
for result in scraper.scraper(session):
for result in scraper.scrape():
result_rows.append(result)
del result
extend_csv_file('output/'+scraper.dataset_name, result_rows,
deduplicate_mode = scraper.deduplicate_mode)
logger.warning('Scraper done: %s', scraper.dataset_name)
del scraper
if __name__ == '__main__':
main()

View File

@ -1,15 +1,35 @@
import dataclasses
import requests
from enum import Enum
import abc
class DeduplicateMode(Enum):
NONE = 0
BY_FIRST_COLUMN = 1
BY_ALL_COLUMNS = 2
@dataclasses.dataclass
class Scraper:
scraper: object # TODO: Callable
dataset_name: str
deduplicate_mode: DeduplicateMode
dataset_format: str = 'list-of-dicts'
@dataclasses.dataclass(frozen = True)
class Scraper(abc.ABC):
session: requests.Session
@staticmethod
@property
@abc.abstractmethod
def dataset_name(self) -> str:
pass
@staticmethod
@property
@abc.abstractmethod
def deduplicate_mode(self) -> DeduplicateMode:
pass
@staticmethod
@property
def dataset_format(self) -> str:
return 'list-of-dicts'
@abc.abstractmethod
def scrape(self):
pass

View File

@ -1,6 +1,7 @@
import secrets
import functools
import logging
import dataclasses
from personal_data.data import Scraper, DeduplicateMode
@ -11,7 +12,12 @@ API_URL_TOKEN = API_ROOT + '/auth/v1/token'
API_URL_ME = API_ROOT + '/accounts/v1/me'
API_URL_WATCH_LIST = API_ROOT + '/content/v2/{account_uuid}/watch-history?page_size=100&locale=en-US'
def scrape_watched_last(session):
@dataclasses.dataclass(frozen = True)
class CrunchyrollScraper(Scraper):
dataset_name = 'episodes_watched_crunchyroll'
deduplicate_mode = DeduplicateMode.BY_ALL_COLUMNS
def scrape(self):
headers = {
'Referer': 'https://www.crunchyroll.com/history',
'Authorization': secrets.CRUNCHYROLL_AUTH, # TODO: Determine automatically
@ -19,7 +25,7 @@ def scrape_watched_last(session):
# Request to get account UUID
logger.info('Getting Access Token')
response = session.post(API_URL_TOKEN, headers = headers, cookies = session.cookies, data = {
response = self.session.post(API_URL_TOKEN, headers = headers, cookies = self.session.cookies, data = {
"device_id": secrets.CRUNCHYROLL_DEVICE_ID, # TODO: Determine automatically
"device_type": "Firefox on Linux",
"grant_type": "etp_rt_cookie"
@ -34,7 +40,7 @@ def scrape_watched_last(session):
# Request to get watch history
logger.info('Getting Watchlist')
response = session.get(API_URL_WATCH_LIST.format(account_uuid = account_uuid), headers = headers)
response = self.session.get(API_URL_WATCH_LIST.format(account_uuid = account_uuid), headers = headers)
response.raise_for_status()
# Parse data
@ -64,7 +70,3 @@ def scrape_watched_last(session):
'series.crunchyroll_id': episode_data['parent_id'],
}
SCRAPERS = [
Scraper(scrape_watched_last, 'episodes_watched_crunchyroll',
deduplicate_mode = DeduplicateMode.BY_ALL_COLUMNS)
]

View File

@ -57,8 +57,10 @@ def scrape_played_last(session):
'game.icon': game_data['image']['url'],
}
'''
SCRAPERS = [
Scraper(scrape_played_last, 'games_played_playstation',
deduplicate_mode = DeduplicateMode.BY_ALL_COLUMNS)
]
'''

View File

@ -1,5 +1,6 @@
import secrets
import functools
import dataclasses
import re
import logging
import bs4
@ -26,11 +27,16 @@ assert game_psnprofiles_id_from_url('/trophies/21045-theatrhythm-final-bar-line/
assert game_psnprofiles_id_from_url('/trophies/21045-theatrhythm-final-bar-line/Jmaanes')
assert game_psnprofiles_id_from_url('/trophy/21045-theatrhythm-final-bar-line/19-seasoned-hunter')
def scrape_personal_page(session):
@dataclasses.dataclass(frozen = True)
class PsnProfilesScraper(Scraper):
dataset_name = 'games_played_playstation'
deduplicate_mode = DeduplicateMode.BY_ALL_COLUMNS
def scrape(self):
# Request to get watch history
logger.info('Getting Watchlist')
url = URL_PROFILE.format(psn_id = secrets.PLAYSTATION_PSN_ID)
response = session.get(url)
response = self.session.get(url)
response.raise_for_status()
NOW = datetime.datetime.strptime(response.headers['Date'], FORMAT_DATE_HEADER)
@ -107,8 +113,3 @@ def scrape_personal_page(session):
if time_played:
d['me.last_played_time'] = time_played
yield d
SCRAPERS = [
Scraper(scrape_personal_page, 'games_played_playstation',
deduplicate_mode = DeduplicateMode.BY_ALL_COLUMNS)
]