Jon Michael Aanes
a53c1c381d
All checks were successful
Python Package / Package (push) Has been skipped
127 lines
3.8 KiB
Python
127 lines
3.8 KiB
Python
|
|
import requests
|
|
import requests_cache
|
|
import csv
|
|
import datetime
|
|
import io
|
|
import browsercookie
|
|
from frozendict import frozendict
|
|
import logging
|
|
import cfscrape
|
|
|
|
logger = logging.getLogger(__name__)
|
|
|
|
import personal_data.fetchers.playstation
|
|
import personal_data.fetchers.crunchyroll
|
|
import personal_data.fetchers.psnprofiles
|
|
import personal_data.data
|
|
|
|
CSV_DIALECT = 'one_true_dialect'
|
|
csv.register_dialect(CSV_DIALECT, lineterminator = '\n', skipinitialspace = True)
|
|
|
|
def determine_scrapers():
|
|
scrapers = []
|
|
#scrapers += personal_data.fetchers.playstation.SCRAPERS
|
|
scrapers += personal_data.fetchers.crunchyroll.SCRAPERS
|
|
scrapers += personal_data.fetchers.psnprofiles.SCRAPERS
|
|
return scrapers
|
|
|
|
def try_value(fn, s: str) -> any:
|
|
try:
|
|
return fn(s)
|
|
except ValueError:
|
|
return None
|
|
|
|
def to_value(s: str) -> any:
|
|
s = s.strip()
|
|
if len(s) == 0:
|
|
return None
|
|
if v := try_value(int, s):
|
|
return v
|
|
if v := try_value(datetime.date.fromisoformat,s):
|
|
return v
|
|
if v := try_value(datetime.datetime.fromisoformat,s):
|
|
return v
|
|
if s.lower() == 'false':
|
|
return False
|
|
if s.lower() == 'true':
|
|
return True
|
|
if s.lower() == 'none':
|
|
return None
|
|
return s
|
|
|
|
def extend_csv_file(filename: str, new_dicts: dict, deduplicate_mode: personal_data.data.DeduplicateMode):
|
|
dicts = []
|
|
try:
|
|
with open(filename, 'r') as csvfile:
|
|
reader = csv.DictReader(csvfile, dialect = CSV_DIALECT)
|
|
for row in reader:
|
|
for k in list(row.keys()):
|
|
row[k] = to_value(row[k])
|
|
if row[k] is None:
|
|
del row[k]
|
|
dicts.append(frozendict(row))
|
|
del csvfile
|
|
except FileNotFoundError as e:
|
|
logger.info('Creating file: %s', filename)
|
|
pass
|
|
|
|
original_num_dicts = len(dicts)
|
|
dicts += [frozendict(d) for d in new_dicts]
|
|
del new_dicts
|
|
|
|
fieldnames = []
|
|
for d in dicts:
|
|
for k in d.keys():
|
|
if k not in fieldnames:
|
|
fieldnames.append(k)
|
|
del k
|
|
del d
|
|
|
|
if deduplicate_mode != personal_data.data.DeduplicateMode.NONE:
|
|
dicts = set(dicts)
|
|
dicts = sorted(dicts, key = lambda d: tuple(str(d.get(fn, '')) for fn in fieldnames))
|
|
|
|
csvfile_in_memory = io.StringIO()
|
|
writer = csv.DictWriter(csvfile_in_memory, fieldnames=fieldnames, dialect = CSV_DIALECT)
|
|
writer.writeheader()
|
|
for d in dicts:
|
|
writer.writerow(d)
|
|
output_csv = csvfile_in_memory.getvalue()
|
|
del writer, csvfile_in_memory
|
|
|
|
with open(filename, 'w') as csvfile:
|
|
csvfile.write(output_csv)
|
|
del csvfile
|
|
logger.warning('Extended CSV "%s" from %d to %d lines', filename, original_num_dicts, len(dicts))
|
|
|
|
STANDARD_HEADERS = {
|
|
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0',
|
|
#"Accept": "application/json, text/plain, */*",
|
|
'Accept-Language': 'en-US,en;q=0.5',
|
|
'Accept-Encoding': 'gzip, deflate, br',
|
|
}
|
|
|
|
def main():
|
|
cookiejar = browsercookie.firefox()
|
|
logger.warning('Got cookiejar from firefox: %s cookies', len(cookiejar))
|
|
|
|
#session = requests_cache.CachedSession('web_cache', cookies = cookiejar)
|
|
session = cfscrape.create_scraper()
|
|
for cookie in cookiejar:
|
|
session.cookies.set_cookie(cookie)
|
|
|
|
for scraper in determine_scrapers():
|
|
logger.warning('Running scraper: %s', scraper.dataset_name)
|
|
result_rows = list()
|
|
for result in scraper.scraper(session):
|
|
result_rows.append(result)
|
|
del result
|
|
extend_csv_file('output/'+scraper.dataset_name, result_rows,
|
|
deduplicate_mode = scraper.deduplicate_mode)
|
|
logger.warning('Scraper done: %s', scraper.dataset_name)
|
|
|
|
if __name__ == '__main__':
|
|
main()
|
|
|