diff --git a/git_time_tracker/source/csv_file.py b/git_time_tracker/source/csv_file.py index 5e26973..3daf3fa 100644 --- a/git_time_tracker/source/csv_file.py +++ b/git_time_tracker/source/csv_file.py @@ -11,7 +11,8 @@ logger = getLogger(__name__) def iterate_samples_from_dicts(rows: list[dict[str, Any]]) -> Iterator[ActivitySample]: if len(rows) == 0: - raise ValueError("No rows provided") + message = 'No rows provided' + raise ValueError(message) if True: event_data = rows[len(rows) // 2] # Hopefully select a useful representative. @@ -20,7 +21,8 @@ def iterate_samples_from_dicts(rows: list[dict[str, Any]]) -> Iterator[ActivityS del event_data if len(possible_keys.time_start) + len(possible_keys.time_end) < 1: - raise ValueError("No time columns found in data") + message = 'No time columns found in data' + raise ValueError(message) for event_data in rows: """ @@ -48,5 +50,6 @@ def iterate_samples_from_csv_file(file_path: Path) -> Iterator[ActivitySample]: dicts = load_csv_file(file_path) samples = list(iterate_samples_from_dicts(dicts)) if len(samples) == 0: - raise ValueError('Did not find any samples') + message = 'Did not find any samples' + raise ValueError(message) yield from samples diff --git a/git_time_tracker/source/git_repo.py b/git_time_tracker/source/git_repo.py index 5e92afc..4756403 100644 --- a/git_time_tracker/source/git_repo.py +++ b/git_time_tracker/source/git_repo.py @@ -29,7 +29,8 @@ def determine_project_name(repo: git.Repo) -> str: def get_samples_from_project(repo: git.Repo) -> Iterator[ActivitySample]: project_name = determine_project_name(repo) if project_name is None: - raise ValueError("Could not determine project name") + message = 'Could not determine project name' + raise ValueError(message) # TODO: Branch on main or master or default diff --git a/html_data_format/__main__.py b/html_data_format/__main__.py index 334efc5..4335a82 100644 --- a/html_data_format/__main__.py +++ b/html_data_format/__main__.py @@ -29,10 +29,10 @@ def newest_entry(csv_type: str): bottle.response.status = 404 return {'error': 'CSV file is empty or no data found'} - TIME_COLUMN = 'time.current' + time_column = 'time.current' - if TIME_COLUMN in data[0]: - newest = max(data, key=lambda r: r.get(TIME_COLUMN)) + if time_column in data[0]: + newest = max(data, key=lambda r: r.get(time_column)) else: newest = data[-1] diff --git a/obsidian_import/__init__.py b/obsidian_import/__init__.py index 1d9c990..529ae6b 100644 --- a/obsidian_import/__init__.py +++ b/obsidian_import/__init__.py @@ -39,18 +39,19 @@ def to_text_duration(duration: datetime.timedelta) -> str: duration -= minutes * MINUTE seconds = int(duration / SECOND) - l = [] + components = [] if hours > 0: - l.append(f'{hours} hours') + components.append(f'{hours} hours') if minutes > 0: - l.append(f'{minutes} minutes') + components.append(f'{minutes} minutes') if seconds > 0: - l.append(f'{seconds} seconds') - return ' '.join(l) + components.append(f'{seconds} seconds') + return ' '.join(components) def iterate_samples_from_rows(rows: Rows) -> Iterator[ActivitySample]: - assert len(rows) > 0 + if len(rows) == 0: + raise ValueError("No rows provided for sample iteration") if True: event_data = rows[len(rows) // 2] # Hopefully select a useful representative. @@ -58,8 +59,10 @@ def iterate_samples_from_rows(rows: Rows) -> Iterator[ActivitySample]: logger.info('Found possible keys: %s', possible_keys) del event_data - assert len(possible_keys.time_start) + len(possible_keys.time_end) >= 1 - assert len(possible_keys.image) >= 0 + if len(possible_keys.time_start) + len(possible_keys.time_end) < 1: + raise ValueError("No time start or end keys found in data") + if len(possible_keys.image) < 0: + raise ValueError("Invalid number of image keys found") for event_data in rows: (start_at, end_at) = start_end(event_data, possible_keys) @@ -142,10 +145,10 @@ def import_stepmania_steps_csv(vault: ObsidianVault, rows: Rows) -> int: rows_per_date[date].append(row) del date, row - COLUMNS = ['score.w1', 'score.w2', 'score.w3', 'score.w4', 'score.w5'] + columns = ['score.w1', 'score.w2', 'score.w3', 'score.w4', 'score.w5'] def all_steps(row: dict[str, int]): - return sum(row[column] for column in COLUMNS) + return sum(row[column] for column in columns) steps_per_date = { date: sum(all_steps(row) for row in rows) diff --git a/personal_data/main.py b/personal_data/main.py index 1b0fdcf..470be9a 100644 --- a/personal_data/main.py +++ b/personal_data/main.py @@ -11,6 +11,8 @@ from . import data, fetchers, notification, util logger = logging.getLogger(__name__) +MIN_COOKIES_THRESHOLD = 10 + try: import cloudscraper except ImportError: @@ -63,8 +65,7 @@ def get_session( ) else: logger.error('Expected cloudscraper, but not defined!') - else: - if ignore_cache: + elif ignore_cache: logger.warning('HTTP cache disabled') return requests.Session() session = session_class( @@ -98,13 +99,13 @@ def get_cookiejar(use_cookiejar: bool): if use_cookiejar: logger.warning('Got cookiejar from firefox') cookiejar = browser_cookie3.firefox() - if len(cookiejar) > 10: + if len(cookiejar) > MIN_COOKIES_THRESHOLD: return cookiejar browser_cookie3.firefox( '/home/jmaa/.cachy/mbui5xg7.default-release/cookies.sqlite', ) logger.warning('Cookiejar has %s cookies', len(cookiejar)) - if len(cookiejar) > 10: + if len(cookiejar) > MIN_COOKIES_THRESHOLD: return cookiejar logger.warning('No cookiejar is used') return [] diff --git a/personal_data/util.py b/personal_data/util.py index 04072fb..c5399e3 100644 --- a/personal_data/util.py +++ b/personal_data/util.py @@ -16,8 +16,7 @@ logger = logging.getLogger(__name__) def safe_del(d: dict, *keys: str): for key in keys: - if key in d: - del d[key] + d.pop(key, None) def equals_without_fields( @@ -64,7 +63,8 @@ def deduplicate_dicts( deduplicate_ignore_columns: list[str], ) -> tuple[list[frozendict[str, Any]], list[str]]: if not isinstance(deduplicate_ignore_columns, list): - raise TypeError(deduplicate_ignore_columns) + message = str(deduplicate_ignore_columns) + raise TypeError(message) fieldnames = [] for d in dicts: @@ -102,7 +102,8 @@ def normalize_dict(d: dict[str, Any] | frozendict[str, Any]) -> frozendict[str, if not isinstance(d, dict) and not isinstance(d, frozendict): d = dataclass_to_dict(d) if not isinstance(d, (dict, frozendict)): - raise TypeError('Expected dict or frozendict') + message = 'Expected dict or frozendict' + raise TypeError(message) safe_values = [ (k, csv_import.csv_str_to_value(csv_import.csv_safe_value(v))) for k, v in d.items() @@ -119,7 +120,8 @@ def extend_csv_file( if deduplicate_ignore_columns == data.Scraper.deduplicate_ignore_columns: deduplicate_ignore_columns = [] if not isinstance(deduplicate_ignore_columns, list): - raise TypeError(deduplicate_ignore_columns) + message = str(deduplicate_ignore_columns) + raise TypeError(message) try: original_dicts = csv_import.load_csv_file(csv_file)