1
0

Restructure to use subclassing

This commit is contained in:
Jon Michael Aanes 2024-03-03 16:59:03 +01:00
parent a53c1c381d
commit a03ba73dcd
6 changed files with 168 additions and 144 deletions

View File

@ -5,7 +5,10 @@ This is a collection of small fetchers for personal data spread around the inter
## Ideas for more fetchers ## Ideas for more fetchers
- [ ] Final Fantasy XIV: Achievements & Mounts
- [ ] Save data for Theatherythm: Most played songs, and liked songs. - [ ] Save data for Theatherythm: Most played songs, and liked songs.
- [ ] YouTube (Music): Liked videos with title and URL. - [ ] YouTube (Music): Liked videos with title and URL.
- [ ] Playstation: Achivement dates, hours played and last played dates - [ ] Playstation: Achivement dates, hours played and last played dates
- [ ] Steam Wishlist and Achievements
- [ ] fredagscafeen.dk

View File

@ -19,13 +19,6 @@ import personal_data.data
CSV_DIALECT = 'one_true_dialect' CSV_DIALECT = 'one_true_dialect'
csv.register_dialect(CSV_DIALECT, lineterminator = '\n', skipinitialspace = True) csv.register_dialect(CSV_DIALECT, lineterminator = '\n', skipinitialspace = True)
def determine_scrapers():
scrapers = []
#scrapers += personal_data.fetchers.playstation.SCRAPERS
scrapers += personal_data.fetchers.crunchyroll.SCRAPERS
scrapers += personal_data.fetchers.psnprofiles.SCRAPERS
return scrapers
def try_value(fn, s: str) -> any: def try_value(fn, s: str) -> any:
try: try:
return fn(s) return fn(s)
@ -111,15 +104,18 @@ def main():
for cookie in cookiejar: for cookie in cookiejar:
session.cookies.set_cookie(cookie) session.cookies.set_cookie(cookie)
for scraper in determine_scrapers(): for scraper_cls in personal_data.data.Scraper.__subclasses__():
scraper = scraper_cls(session)
del scraper_cls
logger.warning('Running scraper: %s', scraper.dataset_name) logger.warning('Running scraper: %s', scraper.dataset_name)
result_rows = list() result_rows = list()
for result in scraper.scraper(session): for result in scraper.scrape():
result_rows.append(result) result_rows.append(result)
del result del result
extend_csv_file('output/'+scraper.dataset_name, result_rows, extend_csv_file('output/'+scraper.dataset_name, result_rows,
deduplicate_mode = scraper.deduplicate_mode) deduplicate_mode = scraper.deduplicate_mode)
logger.warning('Scraper done: %s', scraper.dataset_name) logger.warning('Scraper done: %s', scraper.dataset_name)
del scraper
if __name__ == '__main__': if __name__ == '__main__':
main() main()

View File

@ -1,15 +1,35 @@
import dataclasses import dataclasses
import requests
from enum import Enum from enum import Enum
import abc
class DeduplicateMode(Enum): class DeduplicateMode(Enum):
NONE = 0 NONE = 0
BY_FIRST_COLUMN = 1 BY_FIRST_COLUMN = 1
BY_ALL_COLUMNS = 2 BY_ALL_COLUMNS = 2
@dataclasses.dataclass @dataclasses.dataclass(frozen = True)
class Scraper: class Scraper(abc.ABC):
scraper: object # TODO: Callable session: requests.Session
dataset_name: str
deduplicate_mode: DeduplicateMode @staticmethod
dataset_format: str = 'list-of-dicts' @property
@abc.abstractmethod
def dataset_name(self) -> str:
pass
@staticmethod
@property
@abc.abstractmethod
def deduplicate_mode(self) -> DeduplicateMode:
pass
@staticmethod
@property
def dataset_format(self) -> str:
return 'list-of-dicts'
@abc.abstractmethod
def scrape(self):
pass

View File

@ -1,6 +1,7 @@
import secrets import secrets
import functools import functools
import logging import logging
import dataclasses
from personal_data.data import Scraper, DeduplicateMode from personal_data.data import Scraper, DeduplicateMode
@ -11,7 +12,12 @@ API_URL_TOKEN = API_ROOT + '/auth/v1/token'
API_URL_ME = API_ROOT + '/accounts/v1/me' API_URL_ME = API_ROOT + '/accounts/v1/me'
API_URL_WATCH_LIST = API_ROOT + '/content/v2/{account_uuid}/watch-history?page_size=100&locale=en-US' API_URL_WATCH_LIST = API_ROOT + '/content/v2/{account_uuid}/watch-history?page_size=100&locale=en-US'
def scrape_watched_last(session): @dataclasses.dataclass(frozen = True)
class CrunchyrollScraper(Scraper):
dataset_name = 'episodes_watched_crunchyroll'
deduplicate_mode = DeduplicateMode.BY_ALL_COLUMNS
def scrape(self):
headers = { headers = {
'Referer': 'https://www.crunchyroll.com/history', 'Referer': 'https://www.crunchyroll.com/history',
'Authorization': secrets.CRUNCHYROLL_AUTH, # TODO: Determine automatically 'Authorization': secrets.CRUNCHYROLL_AUTH, # TODO: Determine automatically
@ -19,7 +25,7 @@ def scrape_watched_last(session):
# Request to get account UUID # Request to get account UUID
logger.info('Getting Access Token') logger.info('Getting Access Token')
response = session.post(API_URL_TOKEN, headers = headers, cookies = session.cookies, data = { response = self.session.post(API_URL_TOKEN, headers = headers, cookies = self.session.cookies, data = {
"device_id": secrets.CRUNCHYROLL_DEVICE_ID, # TODO: Determine automatically "device_id": secrets.CRUNCHYROLL_DEVICE_ID, # TODO: Determine automatically
"device_type": "Firefox on Linux", "device_type": "Firefox on Linux",
"grant_type": "etp_rt_cookie" "grant_type": "etp_rt_cookie"
@ -34,7 +40,7 @@ def scrape_watched_last(session):
# Request to get watch history # Request to get watch history
logger.info('Getting Watchlist') logger.info('Getting Watchlist')
response = session.get(API_URL_WATCH_LIST.format(account_uuid = account_uuid), headers = headers) response = self.session.get(API_URL_WATCH_LIST.format(account_uuid = account_uuid), headers = headers)
response.raise_for_status() response.raise_for_status()
# Parse data # Parse data
@ -64,7 +70,3 @@ def scrape_watched_last(session):
'series.crunchyroll_id': episode_data['parent_id'], 'series.crunchyroll_id': episode_data['parent_id'],
} }
SCRAPERS = [
Scraper(scrape_watched_last, 'episodes_watched_crunchyroll',
deduplicate_mode = DeduplicateMode.BY_ALL_COLUMNS)
]

View File

@ -57,8 +57,10 @@ def scrape_played_last(session):
'game.icon': game_data['image']['url'], 'game.icon': game_data['image']['url'],
} }
'''
SCRAPERS = [ SCRAPERS = [
Scraper(scrape_played_last, 'games_played_playstation', Scraper(scrape_played_last, 'games_played_playstation',
deduplicate_mode = DeduplicateMode.BY_ALL_COLUMNS) deduplicate_mode = DeduplicateMode.BY_ALL_COLUMNS)
] ]
'''

View File

@ -1,5 +1,6 @@
import secrets import secrets
import functools import functools
import dataclasses
import re import re
import logging import logging
import bs4 import bs4
@ -26,11 +27,16 @@ assert game_psnprofiles_id_from_url('/trophies/21045-theatrhythm-final-bar-line/
assert game_psnprofiles_id_from_url('/trophies/21045-theatrhythm-final-bar-line/Jmaanes') assert game_psnprofiles_id_from_url('/trophies/21045-theatrhythm-final-bar-line/Jmaanes')
assert game_psnprofiles_id_from_url('/trophy/21045-theatrhythm-final-bar-line/19-seasoned-hunter') assert game_psnprofiles_id_from_url('/trophy/21045-theatrhythm-final-bar-line/19-seasoned-hunter')
def scrape_personal_page(session): @dataclasses.dataclass(frozen = True)
class PsnProfilesScraper(Scraper):
dataset_name = 'games_played_playstation'
deduplicate_mode = DeduplicateMode.BY_ALL_COLUMNS
def scrape(self):
# Request to get watch history # Request to get watch history
logger.info('Getting Watchlist') logger.info('Getting Watchlist')
url = URL_PROFILE.format(psn_id = secrets.PLAYSTATION_PSN_ID) url = URL_PROFILE.format(psn_id = secrets.PLAYSTATION_PSN_ID)
response = session.get(url) response = self.session.get(url)
response.raise_for_status() response.raise_for_status()
NOW = datetime.datetime.strptime(response.headers['Date'], FORMAT_DATE_HEADER) NOW = datetime.datetime.strptime(response.headers['Date'], FORMAT_DATE_HEADER)
@ -107,8 +113,3 @@ def scrape_personal_page(session):
if time_played: if time_played:
d['me.last_played_time'] = time_played d['me.last_played_time'] = time_played
yield d yield d
SCRAPERS = [
Scraper(scrape_personal_page, 'games_played_playstation',
deduplicate_mode = DeduplicateMode.BY_ALL_COLUMNS)
]