1
0
personal-data/personal_data/__main__.py

92 lines
2.8 KiB
Python
Raw Normal View History

2023-12-10 23:27:56 +00:00
import requests
import requests_cache
import csv
import datetime
import io
2023-12-10 23:42:51 +00:00
import browsercookie
2023-12-10 23:27:56 +00:00
from frozendict import frozendict
2024-01-28 20:01:50 +00:00
import logging
2024-02-25 00:38:44 +00:00
import cfscrape
2024-01-28 20:01:50 +00:00
logger = logging.getLogger(__name__)
2023-12-10 23:27:56 +00:00
2024-01-28 21:33:30 +00:00
import personal_data.fetchers.playstation
import personal_data.fetchers.crunchyroll
2024-02-25 00:38:44 +00:00
import personal_data.fetchers.psnprofiles
2023-12-10 23:27:56 +00:00
def determine_scrapers():
scrapers = []
2024-01-28 21:33:30 +00:00
#scrapers += personal_data.fetchers.playstation.SCRAPERS
2024-02-25 00:38:44 +00:00
#scrapers += personal_data.fetchers.crunchyroll.SCRAPERS
scrapers += personal_data.fetchers.psnprofiles.SCRAPERS
2023-12-10 23:27:56 +00:00
return scrapers
2024-02-25 00:38:44 +00:00
def extend_csv_file(filename, new_dicts, deduplicate = False):
2023-12-10 23:27:56 +00:00
dicts = []
2024-01-28 21:29:29 +00:00
try:
with open(filename, 'r') as csvfile:
reader = csv.DictReader(csvfile)
for row in reader:
dicts.append(frozendict(row))
del csvfile
except FileNotFoundError as e:
logger.info('Creating file: %s', filename)
pass
2023-12-10 23:27:56 +00:00
2024-02-25 00:38:44 +00:00
original_num_dicts = len(dicts)
2023-12-10 23:27:56 +00:00
dicts += [frozendict(d) for d in new_dicts]
del new_dicts
2024-02-25 00:38:44 +00:00
fieldnames = []
for d in dicts:
for k in d.keys():
if k not in fieldnames:
fieldnames.append(k)
2024-01-28 21:29:29 +00:00
if deduplicate:
dicts = sorted(set(dicts), key = lambda d: d[fieldnames[0]])
2023-12-10 23:27:56 +00:00
csvfile_in_memory = io.StringIO()
writer = csv.DictWriter(csvfile_in_memory, fieldnames=fieldnames)
writer.writeheader()
for d in dicts:
writer.writerow(d)
output_csv = csvfile_in_memory.getvalue()
del writer, csvfile_in_memory
with open(filename, 'w') as csvfile:
csvfile.write(output_csv)
del csvfile
2024-02-25 00:38:44 +00:00
logger.warning('Extended CSV "%s" from %d to %d lines', filename, original_num_dicts, len(dicts))
2023-12-10 23:27:56 +00:00
2024-01-28 21:29:29 +00:00
STANDARD_HEADERS = {
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0',
2024-02-25 00:38:44 +00:00
#"Accept": "application/json, text/plain, */*",
2024-01-28 21:29:29 +00:00
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate, br',
}
2023-12-10 23:27:56 +00:00
def main():
2023-12-10 23:42:51 +00:00
cookiejar = browsercookie.firefox()
2024-01-28 21:29:29 +00:00
logger.warning('Got cookiejar from firefox: %s cookies', len(cookiejar))
2024-01-28 20:01:50 +00:00
2024-02-25 00:38:44 +00:00
#session = requests_cache.CachedSession('web_cache', cookies = cookiejar)
session = cfscrape.create_scraper()
2024-01-28 20:01:50 +00:00
for cookie in cookiejar:
session.cookies.set_cookie(cookie)
2023-12-10 23:27:56 +00:00
for scraper in determine_scrapers():
2024-01-28 20:01:50 +00:00
logger.warning('Running scraper: %s', scraper.dataset_name)
2024-02-25 00:38:44 +00:00
result_rows = list()
for result in scraper.scraper(session):
result_rows.append(result)
del result
2023-12-10 23:27:56 +00:00
extend_csv_file('output/'+scraper.dataset_name, result_rows,
deduplicate = scraper.deduplicate)
2024-02-25 00:38:44 +00:00
logger.warning('Scraper done: %s', scraper.dataset_name)
2023-12-10 23:27:56 +00:00
if __name__ == '__main__':
main()