2023-12-10 23:27:56 +00:00
|
|
|
|
|
|
|
import requests
|
|
|
|
import requests_cache
|
|
|
|
import csv
|
|
|
|
import datetime
|
|
|
|
import io
|
2023-12-10 23:42:51 +00:00
|
|
|
import browsercookie
|
2023-12-10 23:27:56 +00:00
|
|
|
from frozendict import frozendict
|
2024-01-28 20:01:50 +00:00
|
|
|
import logging
|
|
|
|
|
|
|
|
logger = logging.getLogger(__name__)
|
2023-12-10 23:27:56 +00:00
|
|
|
|
|
|
|
import playstation
|
2024-01-28 21:29:29 +00:00
|
|
|
import crunchyroll
|
2023-12-10 23:27:56 +00:00
|
|
|
|
|
|
|
def determine_scrapers():
|
|
|
|
scrapers = []
|
2024-01-28 21:29:29 +00:00
|
|
|
#scrapers += playstation.SCRAPERS
|
|
|
|
scrapers += crunchyroll.SCRAPERS
|
2023-12-10 23:27:56 +00:00
|
|
|
return scrapers
|
|
|
|
|
|
|
|
def extend_csv_file(filename, new_dicts , deduplicate = False):
|
|
|
|
dicts = []
|
2024-01-28 21:29:29 +00:00
|
|
|
try:
|
|
|
|
with open(filename, 'r') as csvfile:
|
|
|
|
reader = csv.DictReader(csvfile)
|
|
|
|
for row in reader:
|
|
|
|
dicts.append(frozendict(row))
|
|
|
|
del csvfile
|
|
|
|
except FileNotFoundError as e:
|
|
|
|
logger.info('Creating file: %s', filename)
|
|
|
|
pass
|
2023-12-10 23:27:56 +00:00
|
|
|
|
|
|
|
dicts += [frozendict(d) for d in new_dicts]
|
|
|
|
del new_dicts
|
|
|
|
|
2024-01-28 21:29:29 +00:00
|
|
|
fieldnames = list(dicts[0].keys())
|
|
|
|
|
|
|
|
if deduplicate:
|
|
|
|
dicts = sorted(set(dicts), key = lambda d: d[fieldnames[0]])
|
2023-12-10 23:27:56 +00:00
|
|
|
|
|
|
|
csvfile_in_memory = io.StringIO()
|
|
|
|
writer = csv.DictWriter(csvfile_in_memory, fieldnames=fieldnames)
|
|
|
|
writer.writeheader()
|
|
|
|
for d in dicts:
|
|
|
|
writer.writerow(d)
|
|
|
|
output_csv = csvfile_in_memory.getvalue()
|
|
|
|
del writer, csvfile_in_memory
|
|
|
|
|
|
|
|
with open(filename, 'w') as csvfile:
|
|
|
|
csvfile.write(output_csv)
|
|
|
|
del csvfile
|
|
|
|
|
2024-01-28 21:29:29 +00:00
|
|
|
STANDARD_HEADERS = {
|
|
|
|
'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:122.0) Gecko/20100101 Firefox/122.0',
|
|
|
|
"Accept": "application/json, text/plain, */*",
|
|
|
|
'Accept-Language': 'en-US,en;q=0.5',
|
|
|
|
'Accept-Encoding': 'gzip, deflate, br',
|
|
|
|
}
|
|
|
|
|
2023-12-10 23:27:56 +00:00
|
|
|
def main():
|
2023-12-10 23:42:51 +00:00
|
|
|
cookiejar = browsercookie.firefox()
|
2024-01-28 21:29:29 +00:00
|
|
|
logger.warning('Got cookiejar from firefox: %s cookies', len(cookiejar))
|
2024-01-28 20:01:50 +00:00
|
|
|
|
2023-12-10 23:42:51 +00:00
|
|
|
session = requests_cache.CachedSession('web_cache', cookies = cookiejar)
|
2024-01-28 20:01:50 +00:00
|
|
|
for cookie in cookiejar:
|
|
|
|
session.cookies.set_cookie(cookie)
|
|
|
|
|
2023-12-10 23:27:56 +00:00
|
|
|
for scraper in determine_scrapers():
|
2024-01-28 20:01:50 +00:00
|
|
|
logger.warning('Running scraper: %s', scraper.dataset_name)
|
2023-12-10 23:27:56 +00:00
|
|
|
result_rows = list(scraper.scraper(session))
|
|
|
|
extend_csv_file('output/'+scraper.dataset_name, result_rows,
|
|
|
|
deduplicate = scraper.deduplicate)
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
main()
|
|
|
|
|