1
0

Fix code style and optimization issues
Some checks failed
Run Python tests (through Pytest) / Test (push) Failing after 29s
Verify Python project can be installed, loaded and have version checked / Test (push) Failing after 26s

- Fix variable naming: TIME_COLUMN -> time_column, l -> components, COLUMNS -> columns
- Extract exception string literals to variables (EM101)
- Replace assert statements with proper error handling in obsidian_import
- Use dict.pop() instead of del for key removal (RUF051)
- Use elif instead of else-if to reduce indentation (PLR5501)
- Replace magic number 10 with MIN_COOKIES_THRESHOLD constant (PLR2004)

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
Jon Michael Aanes 2025-06-25 00:27:33 +02:00
parent 2272fc1127
commit acaedcbd3a
6 changed files with 36 additions and 26 deletions

View File

@ -11,7 +11,8 @@ logger = getLogger(__name__)
def iterate_samples_from_dicts(rows: list[dict[str, Any]]) -> Iterator[ActivitySample]: def iterate_samples_from_dicts(rows: list[dict[str, Any]]) -> Iterator[ActivitySample]:
if len(rows) == 0: if len(rows) == 0:
raise ValueError("No rows provided") message = 'No rows provided'
raise ValueError(message)
if True: if True:
event_data = rows[len(rows) // 2] # Hopefully select a useful representative. event_data = rows[len(rows) // 2] # Hopefully select a useful representative.
@ -20,7 +21,8 @@ def iterate_samples_from_dicts(rows: list[dict[str, Any]]) -> Iterator[ActivityS
del event_data del event_data
if len(possible_keys.time_start) + len(possible_keys.time_end) < 1: if len(possible_keys.time_start) + len(possible_keys.time_end) < 1:
raise ValueError("No time columns found in data") message = 'No time columns found in data'
raise ValueError(message)
for event_data in rows: for event_data in rows:
""" """
@ -48,5 +50,6 @@ def iterate_samples_from_csv_file(file_path: Path) -> Iterator[ActivitySample]:
dicts = load_csv_file(file_path) dicts = load_csv_file(file_path)
samples = list(iterate_samples_from_dicts(dicts)) samples = list(iterate_samples_from_dicts(dicts))
if len(samples) == 0: if len(samples) == 0:
raise ValueError('Did not find any samples') message = 'Did not find any samples'
raise ValueError(message)
yield from samples yield from samples

View File

@ -29,7 +29,8 @@ def determine_project_name(repo: git.Repo) -> str:
def get_samples_from_project(repo: git.Repo) -> Iterator[ActivitySample]: def get_samples_from_project(repo: git.Repo) -> Iterator[ActivitySample]:
project_name = determine_project_name(repo) project_name = determine_project_name(repo)
if project_name is None: if project_name is None:
raise ValueError("Could not determine project name") message = 'Could not determine project name'
raise ValueError(message)
# TODO: Branch on main or master or default # TODO: Branch on main or master or default

View File

@ -29,10 +29,10 @@ def newest_entry(csv_type: str):
bottle.response.status = 404 bottle.response.status = 404
return {'error': 'CSV file is empty or no data found'} return {'error': 'CSV file is empty or no data found'}
TIME_COLUMN = 'time.current' time_column = 'time.current'
if TIME_COLUMN in data[0]: if time_column in data[0]:
newest = max(data, key=lambda r: r.get(TIME_COLUMN)) newest = max(data, key=lambda r: r.get(time_column))
else: else:
newest = data[-1] newest = data[-1]

View File

@ -39,18 +39,19 @@ def to_text_duration(duration: datetime.timedelta) -> str:
duration -= minutes * MINUTE duration -= minutes * MINUTE
seconds = int(duration / SECOND) seconds = int(duration / SECOND)
l = [] components = []
if hours > 0: if hours > 0:
l.append(f'{hours} hours') components.append(f'{hours} hours')
if minutes > 0: if minutes > 0:
l.append(f'{minutes} minutes') components.append(f'{minutes} minutes')
if seconds > 0: if seconds > 0:
l.append(f'{seconds} seconds') components.append(f'{seconds} seconds')
return ' '.join(l) return ' '.join(components)
def iterate_samples_from_rows(rows: Rows) -> Iterator[ActivitySample]: def iterate_samples_from_rows(rows: Rows) -> Iterator[ActivitySample]:
assert len(rows) > 0 if len(rows) == 0:
raise ValueError("No rows provided for sample iteration")
if True: if True:
event_data = rows[len(rows) // 2] # Hopefully select a useful representative. event_data = rows[len(rows) // 2] # Hopefully select a useful representative.
@ -58,8 +59,10 @@ def iterate_samples_from_rows(rows: Rows) -> Iterator[ActivitySample]:
logger.info('Found possible keys: %s', possible_keys) logger.info('Found possible keys: %s', possible_keys)
del event_data del event_data
assert len(possible_keys.time_start) + len(possible_keys.time_end) >= 1 if len(possible_keys.time_start) + len(possible_keys.time_end) < 1:
assert len(possible_keys.image) >= 0 raise ValueError("No time start or end keys found in data")
if len(possible_keys.image) < 0:
raise ValueError("Invalid number of image keys found")
for event_data in rows: for event_data in rows:
(start_at, end_at) = start_end(event_data, possible_keys) (start_at, end_at) = start_end(event_data, possible_keys)
@ -142,10 +145,10 @@ def import_stepmania_steps_csv(vault: ObsidianVault, rows: Rows) -> int:
rows_per_date[date].append(row) rows_per_date[date].append(row)
del date, row del date, row
COLUMNS = ['score.w1', 'score.w2', 'score.w3', 'score.w4', 'score.w5'] columns = ['score.w1', 'score.w2', 'score.w3', 'score.w4', 'score.w5']
def all_steps(row: dict[str, int]): def all_steps(row: dict[str, int]):
return sum(row[column] for column in COLUMNS) return sum(row[column] for column in columns)
steps_per_date = { steps_per_date = {
date: sum(all_steps(row) for row in rows) date: sum(all_steps(row) for row in rows)

View File

@ -11,6 +11,8 @@ from . import data, fetchers, notification, util
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
MIN_COOKIES_THRESHOLD = 10
try: try:
import cloudscraper import cloudscraper
except ImportError: except ImportError:
@ -63,8 +65,7 @@ def get_session(
) )
else: else:
logger.error('Expected cloudscraper, but not defined!') logger.error('Expected cloudscraper, but not defined!')
else: elif ignore_cache:
if ignore_cache:
logger.warning('HTTP cache disabled') logger.warning('HTTP cache disabled')
return requests.Session() return requests.Session()
session = session_class( session = session_class(
@ -98,13 +99,13 @@ def get_cookiejar(use_cookiejar: bool):
if use_cookiejar: if use_cookiejar:
logger.warning('Got cookiejar from firefox') logger.warning('Got cookiejar from firefox')
cookiejar = browser_cookie3.firefox() cookiejar = browser_cookie3.firefox()
if len(cookiejar) > 10: if len(cookiejar) > MIN_COOKIES_THRESHOLD:
return cookiejar return cookiejar
browser_cookie3.firefox( browser_cookie3.firefox(
'/home/jmaa/.cachy/mbui5xg7.default-release/cookies.sqlite', '/home/jmaa/.cachy/mbui5xg7.default-release/cookies.sqlite',
) )
logger.warning('Cookiejar has %s cookies', len(cookiejar)) logger.warning('Cookiejar has %s cookies', len(cookiejar))
if len(cookiejar) > 10: if len(cookiejar) > MIN_COOKIES_THRESHOLD:
return cookiejar return cookiejar
logger.warning('No cookiejar is used') logger.warning('No cookiejar is used')
return [] return []

View File

@ -16,8 +16,7 @@ logger = logging.getLogger(__name__)
def safe_del(d: dict, *keys: str): def safe_del(d: dict, *keys: str):
for key in keys: for key in keys:
if key in d: d.pop(key, None)
del d[key]
def equals_without_fields( def equals_without_fields(
@ -64,7 +63,8 @@ def deduplicate_dicts(
deduplicate_ignore_columns: list[str], deduplicate_ignore_columns: list[str],
) -> tuple[list[frozendict[str, Any]], list[str]]: ) -> tuple[list[frozendict[str, Any]], list[str]]:
if not isinstance(deduplicate_ignore_columns, list): if not isinstance(deduplicate_ignore_columns, list):
raise TypeError(deduplicate_ignore_columns) message = str(deduplicate_ignore_columns)
raise TypeError(message)
fieldnames = [] fieldnames = []
for d in dicts: for d in dicts:
@ -102,7 +102,8 @@ def normalize_dict(d: dict[str, Any] | frozendict[str, Any]) -> frozendict[str,
if not isinstance(d, dict) and not isinstance(d, frozendict): if not isinstance(d, dict) and not isinstance(d, frozendict):
d = dataclass_to_dict(d) d = dataclass_to_dict(d)
if not isinstance(d, (dict, frozendict)): if not isinstance(d, (dict, frozendict)):
raise TypeError('Expected dict or frozendict') message = 'Expected dict or frozendict'
raise TypeError(message)
safe_values = [ safe_values = [
(k, csv_import.csv_str_to_value(csv_import.csv_safe_value(v))) (k, csv_import.csv_str_to_value(csv_import.csv_safe_value(v)))
for k, v in d.items() for k, v in d.items()
@ -119,7 +120,8 @@ def extend_csv_file(
if deduplicate_ignore_columns == data.Scraper.deduplicate_ignore_columns: if deduplicate_ignore_columns == data.Scraper.deduplicate_ignore_columns:
deduplicate_ignore_columns = [] deduplicate_ignore_columns = []
if not isinstance(deduplicate_ignore_columns, list): if not isinstance(deduplicate_ignore_columns, list):
raise TypeError(deduplicate_ignore_columns) message = str(deduplicate_ignore_columns)
raise TypeError(message)
try: try:
original_dicts = csv_import.load_csv_file(csv_file) original_dicts = csv_import.load_csv_file(csv_file)