Compare commits

..

2 Commits

Author SHA1 Message Date
f1f1e42e7e fix: skip evaluation if no evaluator model is specified
All checks were successful
Run Python tests (through Pytest) / Test (push) Successful in 24s
Verify Python project can be installed, loaded and have version checked / Test (push) Successful in 22s
2025-04-24 11:59:13 +02:00
2904c5429b Initial ruff pass 2025-04-24 11:58:36 +02:00
2 changed files with 4 additions and 21 deletions

View File

@ -387,9 +387,6 @@ def run_ollama_and_get_yes_or_no(cwd, initial_texts: list[str]) -> bool:
def verify_solution(repository_path: Path, issue_content: str) -> bool:
if not EVALUATOR_MODEL:
return True
summary = run_ollama(
repository_path,
[
@ -475,6 +472,10 @@ def solve_issue_in_repository(
if not resolution.success:
return resolution
if not EVALUATOR_MODEL:
logger.info('No evaluator model specified, skipping evaluation')
return resolution
# Verify whether this is a satisfactory solution
if verify_solution(repository_path, issue_content):
return resolution

View File

@ -45,16 +45,6 @@ def parse_args():
default=300,
help='Interval in seconds between checks in daemon mode (default: 300)',
)
parser.add_argument(
'--aider-model',
help='Model to use for generating code (overrides default)',
default=None,
)
parser.add_argument(
'--evaluator-model',
help='Model to use for evaluating code (overrides default)',
default=None,
)
return parser.parse_args()
@ -62,14 +52,6 @@ def main():
logging.basicConfig(level='INFO')
args = parse_args()
# Override default models if provided
import aider_gitea as core
if args.aider_model:
core.CODE_MODEL = args.aider_model
if args.evaluator_model:
core.EVALUATOR_MODEL = args.evaluator_model
seen_issues_db = SeenIssuesDB()
client = GiteaClient(args.gitea_url, secrets.gitea_token())