Compare commits
5 Commits
0cc55e44ff
...
2e2a52e33e
Author | SHA1 | Date | |
---|---|---|---|
2e2a52e33e | |||
5c2c843178 | |||
0763e07039 | |||
b83f70c354 | |||
2425a4cf7c |
1
html_data_format/__init__.py
Normal file
1
html_data_format/__init__.py
Normal file
|
@ -0,0 +1 @@
|
|||
"""# HTML-Data Formatting"""
|
48
html_data_format/__main__.py
Normal file
48
html_data_format/__main__.py
Normal file
|
@ -0,0 +1,48 @@
|
|||
import datetime
|
||||
import json
|
||||
import logging
|
||||
from pathlib import Path
|
||||
import bottle
|
||||
|
||||
from personal_data import csv_import
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
logger.setLevel(logging.INFO)
|
||||
|
||||
ROOT_DIRECTORY = Path('output')
|
||||
|
||||
|
||||
@bottle.route('/<csv_type>/newest')
|
||||
def newest_entry(csv_type: str):
|
||||
"""
|
||||
Loads a CSV file (default: data.csv, overridable by query param 'file'),
|
||||
finds the newest entry based on the 'time.current' column, and returns it as JSON.
|
||||
"""
|
||||
|
||||
path = ROOT_DIRECTORY/f'{csv_type}.csv'
|
||||
|
||||
bottle.response.content_type = 'application/json'
|
||||
|
||||
try:
|
||||
data = csv_import.load_csv_file(path)
|
||||
except Exception:
|
||||
logger.exception('Error loading CSV file at %s', path)
|
||||
bottle.response.status = 500
|
||||
return {'error': 'Failed to load CSV'}
|
||||
|
||||
if not data:
|
||||
bottle.response.status = 404
|
||||
return {'error': 'CSV file is empty or no data found'}
|
||||
|
||||
TIME_COLUMN = 'time.current'
|
||||
|
||||
if TIME_COLUMN in data[0]:
|
||||
newest = max(data, key=lambda r: r.get(TIME_COLUMN))
|
||||
else:
|
||||
newest = data[-1]
|
||||
|
||||
return {csv_import.csv_safe_value(k):csv_import.csv_safe_value(v) for k,v in newest.items()}
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
bottle.run(host='localhost', port=8080, debug=True)
|
|
@ -1 +1 @@
|
|||
__version__ = '0.1.68'
|
||||
__version__ = '0.1.69'
|
||||
|
|
|
@ -1,10 +1,10 @@
|
|||
import dataclasses
|
||||
import datetime
|
||||
import requests
|
||||
import logging
|
||||
from collections.abc import Iterator, Mapping
|
||||
from email.utils import parsedate_to_datetime
|
||||
|
||||
import requests
|
||||
import requests_util
|
||||
|
||||
from personal_data.data import DeduplicateMode, Scraper
|
||||
|
@ -66,11 +66,13 @@ class WaniKaniLessonsFetcher(Scraper):
|
|||
yield data_item
|
||||
url = json_resp.get('pages', {}).get('next_url')
|
||||
|
||||
|
||||
def date_from_response(response) -> datetime.datetime:
|
||||
if date_header := response.headers.get('Date'):
|
||||
return parsedate_to_datetime(date_header)
|
||||
return datetime.datetime.now(datetime.timezone.utc)
|
||||
|
||||
|
||||
@dataclasses.dataclass(frozen=True)
|
||||
class WaniKaniSummaryFetcher(Scraper):
|
||||
dataset_name: str = 'wanikani_summary'
|
||||
|
|
Loading…
Reference in New Issue
Block a user