diff --git a/Makefile b/Makefile index 4d8b23c8..f83e11aa 100644 --- a/Makefile +++ b/Makefile @@ -241,6 +241,11 @@ process-erratum: $(COMPOSE_SUPERVISOR) run --rm \ supervisor python -m supervisor.main $(DEBUG_FLAG) $(IGNORE_NEEDS_ATTENTION_FLAG) $(DRY_RUN_FLAG) process-erratum $(ERRATA_ID) +.PHONY: preliminary-testing +preliminary-testing: + $(COMPOSE_SUPERVISOR) run --rm \ + supervisor python -m supervisor.main $(DEBUG_FLAG) $(IGNORE_NEEDS_ATTENTION_FLAG) $(DRY_RUN_FLAG) preliminary-testing $(JIRA_ISSUE) + # Common utility targets diff --git a/README-supervisor.md b/README-supervisor.md index 9781234d..bfc7dd69 100644 --- a/README-supervisor.md +++ b/README-supervisor.md @@ -45,6 +45,7 @@ To process a single issue or erratum, you can run: ``` make process-issue JIRA_ISSUE=RHEL-12345 make process-erratum ERRATA_ID=12345 +make preliminary-testing JIRA_ISSUE=RHEL-12345 ``` This will process the work item in a one-off container, @@ -58,6 +59,54 @@ DRY_RUN=true # Don't actually make any changes to issues/errata, just show what DEBUG=true # Use more detailed logging (shows exactly what would happen for dry run items) ``` +## Preliminary Testing Workflow + +The `preliminary-testing` command evaluates whether the build fixing a Jira issue +has passed preliminary testing — the gating and CI checks that must pass before +the build can be added to a compose and erratum. + +### How it works + +The workflow uses an AI agent to analyze test results from two sources: + +1. **GreenWave gating status** — If the issue has a `Fixed in Build` NVR, the agent + fetches the HTML page from `https://gating-status.osci.redhat.com/query?nvr=` + and checks whether all required gating tests have passed. + +2. **OSCI results in MR comments** — The workflow looks up merge/pull requests linked + in the Jira issue's Development section (via the Jira dev-status API), then fetches + comments from those MRs on GitLab. It looks for "Results for pipeline ..." comments + containing OSCI test results. + +The workflow works with whatever data is available — it can proceed with only one +source if the other is unavailable (e.g. no build NVR set yet, or no linked MRs). + +### Entry conditions + +- Issue status must be `In Progress` +- `Preliminary Testing` field must not already be set to `Pass` +- At least one of: `Fixed in Build` NVR or linked pull requests in the Development section + +### Outcomes + +| Condition | Action | +|-----------|--------| +| All tests passed and Test Coverage is set | Sets `Preliminary Testing = Pass` with a summary comment | +| All tests passed but Test Coverage is not set | Flags for human attention (`jotnar_needs_attention` label) with test results in comment | +| Tests failed | Flags for human attention with failure details | +| Tests still running or pending | Reschedules the work item for later | +| No test results found | Flags for human attention | +| Analysis error | Flags for human attention with error details | + +### Usage + +``` +make preliminary-testing JIRA_ISSUE=RHEL-12345 +``` + +Supports `DRY_RUN=true`, `DEBUG=true`, and `IGNORE_NEEDS_ATTENTION=true` variables +(same as other workflows). + ## Testing the collector You can test the collection process with: diff --git a/supervisor/jira_utils.py b/supervisor/jira_utils.py index b60774d0..0b5109f0 100644 --- a/supervisor/jira_utils.py +++ b/supervisor/jira_utils.py @@ -677,6 +677,73 @@ def add_issue_attachments( add_issue_comment(issue_key, comment, dry_run=dry_run) +def get_issue_pull_requests(issue_key: str) -> list[dict[str, Any]]: + """ + Get pull/merge requests linked to a JIRA issue via the dev-status API. + + Returns a list of pull request dicts with keys: id, name, status, url, + source (branch info), destination (branch info), repositoryName, repositoryUrl. + """ + # dev-status API requires the numeric issue ID + issue_data = jira_api_get(f"issue/{urlquote(issue_key)}?fields=") + issue_id = issue_data["id"] + + url = f"{jira_url()}/rest/dev-status/latest/issue/detail" + params = { + "issueId": issue_id, + "applicationType": "GitLab", + "dataType": "pullrequest", + } + + response = requests_session().get(url, headers=jira_headers(), params=params) + if not response.ok: + logger.error( + "GET %s (params=%s) failed.\nerror:\n%s", + url, + params, + response.text, + ) + raise_for_status(response) + + data = response.json() + pull_requests: list[dict[str, Any]] = [] + for detail in data.get("detail", []): + pull_requests.extend(detail.get("pullRequests", [])) + + return pull_requests + + +def set_preliminary_testing( + issue_key: str, + value: PreliminaryTesting, + comment: CommentSpec = None, + *, + dry_run: bool = False, +) -> None: + """Update the Preliminary Testing custom field on a JIRA issue.""" + custom_fields = get_custom_fields() + field_id = custom_fields["Preliminary Testing"] + + path = f"issue/{urlquote(issue_key)}" + body: dict[str, Any] = { + "fields": {field_id: {"value": str(value)}}, + "update": {}, + } + if comment is not None: + _add_comment_update(body["update"], comment) + + if dry_run: + logger.info( + "Dry run: would set Preliminary Testing to %s on issue %s", + value, + issue_key, + ) + logger.debug("Dry run: would put %s to %s", body, path) + return + + jira_api_put(path, json=body) + + def get_issue_attachment(issue_key: str, filename: str) -> bytes: """ Retrieve the content of a specific attachment from a JIRA issue. diff --git a/supervisor/main.py b/supervisor/main.py index 0b46e4fe..d711bcec 100644 --- a/supervisor/main.py +++ b/supervisor/main.py @@ -14,6 +14,7 @@ ErratumHandler, ) from .issue_handler import IssueHandler +from .preliminary_testing_handler import PreliminaryTestingHandler from .jira_utils import get_issue from .http_utils import with_http_sessions from .work_queue import WorkQueue, WorkItemType, work_queue @@ -251,6 +252,44 @@ def process_erratum(id_or_url: str): asyncio.run(do_process_erratum(id)) +@with_http_sessions() +async def do_preliminary_testing(key: str): + await init_kerberos_ticket() + + issue = get_issue(key, full=True) + result = await PreliminaryTestingHandler( + issue, + dry_run=app_state.dry_run, + ignore_needs_attention=app_state.ignore_needs_attention, + ).run() + logger.info( + "Issue %s preliminary testing processed, status=%s, reschedule_in=%s", + key, + result.status, + result.reschedule_in if result.reschedule_in >= 0 else "never", + ) + + +@app.command() +def preliminary_testing( + key_or_url: str, +): + check_env(chat=True, jira=True, gitlab=True) + + if key_or_url.startswith("http"): + m = re.match(r"https://redhat.atlassian.net/browse/([^/?]+)(?:\?.*)?$", key_or_url) + if m is None: + raise typer.BadParameter(f"Invalid issue URL {key_or_url}") + key = m.group(1) + else: + key = key_or_url + + if not key.startswith("RHEL-"): + raise typer.BadParameter("Issue must be in the RHEL project") + + asyncio.run(do_preliminary_testing(key)) + + @app.callback() def main( debug: bool = typer.Option(False, help="Enable debug mode."), diff --git a/supervisor/preliminary_testing_analyst.py b/supervisor/preliminary_testing_analyst.py new file mode 100644 index 00000000..877fd8d1 --- /dev/null +++ b/supervisor/preliminary_testing_analyst.py @@ -0,0 +1,160 @@ +import json +import logging +import os +from datetime import datetime, timezone +from typing import Any + +from pydantic import BaseModel, Field + +from beeai_framework.agents.tool_calling import ToolCallingAgent +from beeai_framework.agents.types import AgentMeta +from beeai_framework.backend import ChatModel +from beeai_framework.memory import UnconstrainedMemory +from beeai_framework.template import PromptTemplate, PromptTemplateInput + +from agents.utils import get_agent_execution_config +from .supervisor_types import FullIssue, TestingState +from .tools.fetch_greenwave import FetchGreenWaveTool +from .tools.fetch_gitlab_mr_notes import FetchGitlabMrNotesTool +from .tools.read_issue import ReadIssueTool + +logger = logging.getLogger(__name__) + + +class InputSchema(BaseModel): + issue: FullIssue = Field(description="Details of JIRA issue to analyze") + build_nvr: str | None = Field(description="NVR of the build to check, if available") + jira_pull_requests: str = Field( + description="Pull/merge requests linked in Jira Development section (JSON)" + ) + current_time: datetime = Field(description="Current timestamp") + + +class PreliminaryTestingResult(BaseModel): + state: TestingState = Field(description="State of preliminary testing") + comment: str | None = Field( + description="Comment to add to the JIRA issue explaining the result" + ) + + +TEMPLATE = """\ +You are the preliminary testing analyst agent for Project Jötnar. Your task is to +analyze a RHEL JIRA issue and determine if the build fixing it has passed preliminary +testing — the gating and CI checks that must pass before the build can be added to a +compose and erratum. + +JIRA_ISSUE_DATA: {{ issue }} +BUILD_NVR: {{ build_nvr }} +JIRA_PULL_REQUESTS (from Jira Development section): {{ jira_pull_requests }} +CURRENT_TIME: {{ current_time }} + +You have two sources of test results to check. You should attempt to check all +available sources, and make your decision based on whichever results you can obtain. + +1. **GreenWave / OSCI Gating Status**: If BUILD_NVR is available (not None), use + the fetch_greenwave tool with the BUILD_NVR to check the OSCI gating results. + The HTML page will show which gating test jobs ran and whether they passed or + failed. All required/gating tests must pass. + The GreenWave Monitor URL is https://gating-status.osci.redhat.com/query?nvr=BUILD_NVR + — when linking to gating results in your comment, ONLY use this exact URL pattern. + Do NOT invent or guess any other URLs for gating results. + If BUILD_NVR is None, skip this source. + +2. **OSCI results in MR comments**: If JIRA_PULL_REQUESTS contains linked merge + requests (from the Jira Development section), use the fetch_gitlab_mr_notes tool + to read the comments on those MRs. Look for comments titled "Results for pipeline ..." + — these contain OSCI test results. Parse these results to determine which tests + passed and which failed. + To use fetch_gitlab_mr_notes, extract the project path and MR IID from the + JIRA_PULL_REQUESTS data. The "id" field has format "project/path!iid" and the + "url" field contains the full MR URL. The "repositoryUrl" contains the project URL + from which you can derive the project path (remove the leading https://gitlab.com/). + +If a tool call fails or returns an error, note it in your comment but continue +analyzing with the results you were able to obtain. Only return tests-error if +you could not obtain results from ANY source. + +Call the final_answer tool passing in the state and a comment as follows. +The comment should use JIRA comment syntax (headings, bullet points, links). +Do NOT wrap your comment in a {{panel}} macro — that will be added automatically. + +If all available gating tests have passed (and MR OSCI results passed, if available): + state: tests-passed + comment: [Brief summary of what passed, with links to the GreenWave page and MR + if available. Note if any source was unavailable.] + +If any required/gating tests have failed: + state: tests-failed + comment: [List the failed tests with URLs, explain which are from GreenWave and + which from MR comments] + +If tests are still running (pipeline status is running, or GreenWave shows tests in progress): + state: tests-running + comment: [Brief description of what is still running] + +If tests are queued but not yet started: + state: tests-pending + comment: [Brief description] + +If no test results can be found from any source: + state: tests-not-running + comment: [Explain that no test results were found and manual intervention may be needed] + +If all sources returned errors and no results could be obtained: + state: tests-error + comment: [Explain which sources were tried and what errors occurred] +""" + + +def render_prompt(input: InputSchema) -> str: + return PromptTemplate( + PromptTemplateInput(schema=InputSchema, template=TEMPLATE) + ).render(input) + + +async def analyze_preliminary_testing( + jira_issue: FullIssue, + build_nvr: str | None, + jira_pull_requests: list[dict[str, Any]] | None = None, +) -> PreliminaryTestingResult: + tools = [ + FetchGreenWaveTool(), + FetchGitlabMrNotesTool(), + ReadIssueTool(), + ] + + agent = ToolCallingAgent( + llm=ChatModel.from_name( + os.environ["CHAT_MODEL"], + allow_parallel_tool_calls=True, + ), + memory=UnconstrainedMemory(), + tools=tools, + meta=AgentMeta( + name="PreliminaryTestingAnalyst", + description="Agent that analyzes GreenWave gating and MR comment results to determine preliminary testing status", + tools=tools, + ), + ) + + input = InputSchema( + issue=jira_issue, + build_nvr=build_nvr, + jira_pull_requests=json.dumps(jira_pull_requests or [], indent=2), + current_time=datetime.now(timezone.utc), + ) + + response = await agent.run( + render_prompt(input), + expected_output=PreliminaryTestingResult, + **get_agent_execution_config(), # type: ignore + ) + + if response.state.result is None: + raise ValueError("Agent did not return a result") + + output = PreliminaryTestingResult.model_validate_json(response.state.result.text) + logger.info( + "Preliminary testing analysis completed: %s", output.model_dump_json(indent=4) + ) + return output diff --git a/supervisor/preliminary_testing_handler.py b/supervisor/preliminary_testing_handler.py new file mode 100644 index 00000000..ba995df9 --- /dev/null +++ b/supervisor/preliminary_testing_handler.py @@ -0,0 +1,193 @@ +import logging +from typing import Any + +from common.constants import JiraLabels + +from .work_item_handler import WorkItemHandler +from .jira_utils import ( + add_issue_label, + format_attention_message, + get_issue_pull_requests, + set_preliminary_testing, +) +from .supervisor_types import ( + FullIssue, + IssueStatus, + PreliminaryTesting, + TestingState, + WorkflowResult, +) +from .preliminary_testing_analyst import analyze_preliminary_testing + +logger = logging.getLogger(__name__) + + +class PreliminaryTestingHandler(WorkItemHandler): + """ + Perform preliminary testing evaluation for a JIRA issue. + + Checks GreenWave gating results and OSCI results posted as MR + comments for the build fixing the issue, and sets the Preliminary + Testing field to Pass if all tests have passed. + """ + + def __init__( + self, issue: FullIssue, *, dry_run: bool, ignore_needs_attention: bool + ): + super().__init__(dry_run=dry_run, ignore_needs_attention=ignore_needs_attention) + self.issue = issue + + def resolve_flag_attention(self, why: str, *, details_comment: str | None = None): + if details_comment: + full_comment = f"{format_attention_message(why)}\n\n{details_comment}" + else: + full_comment = format_attention_message(why) + + add_issue_label( + self.issue.key, + JiraLabels.NEEDS_ATTENTION.value, + full_comment, + dry_run=self.dry_run, + ) + + return WorkflowResult(status=why, reschedule_in=-1) + + def resolve_set_preliminary_testing_pass(self, comment: str) -> WorkflowResult: + set_preliminary_testing( + self.issue.key, + PreliminaryTesting.PASS, + comment, + dry_run=self.dry_run, + ) + + return WorkflowResult( + status="Preliminary testing passed", reschedule_in=-1 + ) + + def find_pull_requests(self) -> list[dict[str, Any]]: + """Find merge/pull requests linked to this issue via Jira dev-status API.""" + try: + return get_issue_pull_requests(self.issue.key) + except Exception as e: + logger.warning( + "Failed to get pull requests from Jira dev-status for %s: %s", + self.issue.key, + e, + ) + return [] + + async def run(self) -> WorkflowResult: + issue = self.issue + + logger.info( + "Running preliminary testing workflow for issue %s", issue.url + ) + + # Check for needs_attention label + if ( + JiraLabels.NEEDS_ATTENTION.value in issue.labels + and not self.ignore_needs_attention + ): + return self.resolve_remove_work_item( + "Issue has the jotnar_needs_attention label" + ) + + # Validate single component + if len(issue.components) != 1: + return self.resolve_flag_attention( + "This issue has multiple components. " + "Jotnar only handles issues with single component currently." + ) + + # Check entry conditions + if issue.status != IssueStatus.IN_PROGRESS: + return self.resolve_remove_work_item( + f"Issue status is {issue.status}, expected In Progress" + ) + + if issue.preliminary_testing == PreliminaryTesting.PASS: + return self.resolve_remove_work_item( + "Preliminary Testing is already set to Pass" + ) + + # Check if Test Coverage is filled + test_coverage_missing = not issue.test_coverage + + build_nvr = issue.fixed_in_build + + # Find pull requests linked in Jira Development section + pull_requests = self.find_pull_requests() + if pull_requests: + logger.info( + "Found %d pull request(s) via Jira dev-status for %s", + len(pull_requests), + issue.key, + ) + else: + logger.warning( + "No pull requests found for %s", issue.key, + ) + + # We need at least a build NVR or linked PRs to proceed + if build_nvr is None and not pull_requests: + return self.resolve_remove_work_item( + "Issue has no Fixed in Build and no linked pull requests" + ) + + if build_nvr is None: + logger.info( + "Fixed in Build not set for %s, will analyze using MR results only", + issue.key, + ) + + # Run the AI analysis with whatever data is available + analysis = await analyze_preliminary_testing( + jira_issue=issue, + build_nvr=build_nvr, + jira_pull_requests=pull_requests, + ) + + match analysis.state: + case TestingState.PASSED: + if test_coverage_missing: + return self.resolve_flag_attention( + "Preliminary tests passed but Test Coverage field is not set", + details_comment=analysis.comment, + ) + return self.resolve_set_preliminary_testing_pass( + analysis.comment + or "Preliminary testing has passed.", + ) + case TestingState.FAILED: + return self.resolve_flag_attention( + "Preliminary testing failed - see details below", + details_comment=analysis.comment, + ) + case TestingState.PENDING: + return self.resolve_wait("Preliminary tests are pending") + case TestingState.RUNNING: + return self.resolve_wait("Preliminary tests are running") + case TestingState.NOT_RUNNING: + return self.resolve_flag_attention( + "Preliminary tests are not running - see details below", + details_comment=analysis.comment, + ) + case TestingState.ERROR: + return self.resolve_flag_attention( + "An error occurred during preliminary testing analysis - see details below", + details_comment=analysis.comment, + ) + case TestingState.WAIVED: + if test_coverage_missing: + return self.resolve_flag_attention( + "Preliminary tests passed (waived) but Test Coverage field is not set", + details_comment=analysis.comment, + ) + return self.resolve_set_preliminary_testing_pass( + analysis.comment + or "Preliminary testing waived - non-blocking failures detected.", + ) + case _: + raise ValueError( + f"Unknown testing state: {analysis.state}" + ) diff --git a/supervisor/tools/fetch_gitlab_mr_notes.py b/supervisor/tools/fetch_gitlab_mr_notes.py new file mode 100644 index 00000000..6e5e8349 --- /dev/null +++ b/supervisor/tools/fetch_gitlab_mr_notes.py @@ -0,0 +1,97 @@ +import json +import logging +from urllib.parse import quote as urlquote + +from pydantic import BaseModel, Field + +from beeai_framework.context import RunContext +from beeai_framework.emitter import Emitter +from beeai_framework.tools import StringToolOutput, Tool, ToolRunOptions + +from ..gitlab_utils import GITLAB_URL, gitlab_headers +from ..http_utils import aiohttp_session + +logger = logging.getLogger(__name__) + + +class FetchGitlabMrNotesInput(BaseModel): + project: str = Field( + description="GitLab project path (e.g. 'redhat/centos-stream/rpms/podman')" + ) + mr_iid: int = Field(description="Merge request IID within the project") + + +class FetchGitlabMrNotesTool( + Tool[FetchGitlabMrNotesInput, ToolRunOptions, StringToolOutput] +): + """ + Tool to fetch comments/notes from a GitLab merge request. + This is useful for finding OSCI test results posted as comments + on merge requests with titles like "Results for pipeline ...". + """ + + name = "fetch_gitlab_mr_notes" # type: ignore + description = ( # type: ignore + "Fetch comments/notes from a GitLab merge request. " + "Returns JSON with a list of notes including author, body, and creation date. " + "Use this to find OSCI test results posted as comments on merge requests." + ) + input_schema = FetchGitlabMrNotesInput # type: ignore + + def _create_emitter(self) -> Emitter: + return Emitter.root().child( + namespace=["tool", "fetch_gitlab_mr_notes"], + creator=self, + ) + + async def _run( + self, + input: FetchGitlabMrNotesInput, + options: ToolRunOptions | None, + context: RunContext, + ) -> StringToolOutput: + session = aiohttp_session() + headers = gitlab_headers() + encoded_project = urlquote(input.project, safe="") + + url = ( + f"{GITLAB_URL}/api/v4/projects/{encoded_project}" + f"/merge_requests/{input.mr_iid}/notes" + ) + logger.info("Fetching MR notes from %s", url) + + try: + async with session.get( + url, headers=headers, params={"per_page": "100"} + ) as response: + if response.status != 200: + text = await response.text() + logger.error( + "Failed to fetch MR notes (HTTP %d): %s", + response.status, + text, + ) + return StringToolOutput( + result=f"Failed to fetch notes for MR !{input.mr_iid} " + f"in {input.project} (HTTP {response.status}): {text}" + ) + + notes = await response.json() + + result = [ + { + "author": note["author"]["name"], + "body": note["body"], + "created_at": note.get("created_at"), + "system": note.get("system", False), + } + for note in notes + ] + + return StringToolOutput(result=json.dumps(result, indent=2)) + + except Exception as e: + logger.error("Error fetching GitLab MR notes: %s", e) + return StringToolOutput( + result=f"Error fetching GitLab MR notes: {e}" + ) diff --git a/supervisor/tools/fetch_greenwave.py b/supervisor/tools/fetch_greenwave.py new file mode 100644 index 00000000..831528fc --- /dev/null +++ b/supervisor/tools/fetch_greenwave.py @@ -0,0 +1,72 @@ +import logging +from urllib.parse import quote as urlquote + +from pydantic import BaseModel, Field + +from beeai_framework.context import RunContext +from beeai_framework.emitter import Emitter +from beeai_framework.tools import StringToolOutput, Tool, ToolRunOptions + +from ..http_utils import aiohttp_session + +logger = logging.getLogger(__name__) + +GREENWAVE_URL = "https://gating-status.osci.redhat.com" + + +class FetchGreenWaveInput(BaseModel): + nvr: str = Field(description="NVR (Name-Version-Release) of the build to check gating status for") + + +class FetchGreenWaveTool(Tool[FetchGreenWaveInput, ToolRunOptions, StringToolOutput]): + """ + Tool to fetch the gating status page from GreenWave Monitor for a given build NVR. + The page contains OSCI gating test results that determine whether a build can be + added to a compose and erratum. + """ + + name = "fetch_greenwave" # type: ignore + description = ( # type: ignore + "Fetch the OSCI gating status page from GreenWave Monitor for a given build NVR. " + "Returns the HTML content of the gating status page which contains test results " + "and their pass/fail status. Use this to determine if gating tests have passed." + ) + input_schema = FetchGreenWaveInput # type: ignore + + def _create_emitter(self) -> Emitter: + return Emitter.root().child( + namespace=["tool", "fetch_greenwave"], + creator=self, + ) + + async def _run( + self, + input: FetchGreenWaveInput, + options: ToolRunOptions | None, + context: RunContext, + ) -> StringToolOutput: + session = aiohttp_session() + + url = f"{GREENWAVE_URL}/query?nvr={urlquote(input.nvr)}" + logger.info("Fetching GreenWave gating status from %s", url) + + try: + async with session.get(url) as response: + if response.status == 200: + html = await response.text() + return StringToolOutput(result=html) + else: + text = await response.text() + logger.error( + "GreenWave request failed with status %d: %s", + response.status, + text, + ) + return StringToolOutput( + result=f"Failed to fetch GreenWave gating status (HTTP {response.status}): {text}" + ) + except Exception as e: + logger.error("Error fetching GreenWave gating status: %s", e) + return StringToolOutput( + result=f"Error fetching GreenWave gating status: {e}" + )