Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
5 changes: 5 additions & 0 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -241,6 +241,11 @@ process-erratum:
$(COMPOSE_SUPERVISOR) run --rm \
supervisor python -m supervisor.main $(DEBUG_FLAG) $(IGNORE_NEEDS_ATTENTION_FLAG) $(DRY_RUN_FLAG) process-erratum $(ERRATA_ID)

.PHONY: preliminary-testing
preliminary-testing:
$(COMPOSE_SUPERVISOR) run --rm \
supervisor python -m supervisor.main $(DEBUG_FLAG) $(IGNORE_NEEDS_ATTENTION_FLAG) $(DRY_RUN_FLAG) preliminary-testing $(JIRA_ISSUE)


# Common utility targets

Expand Down
49 changes: 49 additions & 0 deletions README-supervisor.md
Original file line number Diff line number Diff line change
Expand Up @@ -45,6 +45,7 @@ To process a single issue or erratum, you can run:
```
make process-issue JIRA_ISSUE=RHEL-12345
make process-erratum ERRATA_ID=12345
make preliminary-testing JIRA_ISSUE=RHEL-12345
```

This will process the work item in a one-off container,
Expand All @@ -58,6 +59,54 @@ DRY_RUN=true # Don't actually make any changes to issues/errata, just show what
DEBUG=true # Use more detailed logging (shows exactly what would happen for dry run items)
```

## Preliminary Testing Workflow

The `preliminary-testing` command evaluates whether the build fixing a Jira issue
has passed preliminary testing — the gating and CI checks that must pass before
the build can be added to a compose and erratum.

### How it works

The workflow uses an AI agent to analyze test results from two sources:

1. **GreenWave gating status** — If the issue has a `Fixed in Build` NVR, the agent
fetches the HTML page from `https://gating-status.osci.redhat.com/query?nvr=<NVR>`
and checks whether all required gating tests have passed.

2. **OSCI results in MR comments** — The workflow looks up merge/pull requests linked
in the Jira issue's Development section (via the Jira dev-status API), then fetches
comments from those MRs on GitLab. It looks for "Results for pipeline ..." comments
containing OSCI test results.

The workflow works with whatever data is available — it can proceed with only one
source if the other is unavailable (e.g. no build NVR set yet, or no linked MRs).

### Entry conditions

- Issue status must be `In Progress`
- `Preliminary Testing` field must not already be set to `Pass`
- At least one of: `Fixed in Build` NVR or linked pull requests in the Development section

### Outcomes

| Condition | Action |
|-----------|--------|
| All tests passed and Test Coverage is set | Sets `Preliminary Testing = Pass` with a summary comment |
| All tests passed but Test Coverage is not set | Flags for human attention (`jotnar_needs_attention` label) with test results in comment |
| Tests failed | Flags for human attention with failure details |
| Tests still running or pending | Reschedules the work item for later |
| No test results found | Flags for human attention |
| Analysis error | Flags for human attention with error details |

### Usage

```
make preliminary-testing JIRA_ISSUE=RHEL-12345
```

Supports `DRY_RUN=true`, `DEBUG=true`, and `IGNORE_NEEDS_ATTENTION=true` variables
(same as other workflows).

## Testing the collector

You can test the collection process with:
Expand Down
67 changes: 67 additions & 0 deletions supervisor/jira_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -677,6 +677,73 @@ def add_issue_attachments(
add_issue_comment(issue_key, comment, dry_run=dry_run)


def get_issue_pull_requests(issue_key: str) -> list[dict[str, Any]]:
"""
Get pull/merge requests linked to a JIRA issue via the dev-status API.

Returns a list of pull request dicts with keys: id, name, status, url,
source (branch info), destination (branch info), repositoryName, repositoryUrl.
"""
# dev-status API requires the numeric issue ID
issue_data = jira_api_get(f"issue/{urlquote(issue_key)}?fields=")
issue_id = issue_data["id"]

url = f"{jira_url()}/rest/dev-status/latest/issue/detail"
params = {
"issueId": issue_id,
"applicationType": "GitLab",
"dataType": "pullrequest",
}

response = requests_session().get(url, headers=jira_headers(), params=params)
if not response.ok:
logger.error(
"GET %s (params=%s) failed.\nerror:\n%s",
url,
params,
response.text,
)
raise_for_status(response)

data = response.json()
pull_requests: list[dict[str, Any]] = []
for detail in data.get("detail", []):
pull_requests.extend(detail.get("pullRequests", []))

return pull_requests


def set_preliminary_testing(
issue_key: str,
value: PreliminaryTesting,
comment: CommentSpec = None,
*,
dry_run: bool = False,
) -> None:
"""Update the Preliminary Testing custom field on a JIRA issue."""
custom_fields = get_custom_fields()
field_id = custom_fields["Preliminary Testing"]

path = f"issue/{urlquote(issue_key)}"
body: dict[str, Any] = {
"fields": {field_id: {"value": str(value)}},
"update": {},
}
if comment is not None:
_add_comment_update(body["update"], comment)

if dry_run:
logger.info(
"Dry run: would set Preliminary Testing to %s on issue %s",
value,
issue_key,
)
logger.debug("Dry run: would put %s to %s", body, path)
return

jira_api_put(path, json=body)


def get_issue_attachment(issue_key: str, filename: str) -> bytes:
"""
Retrieve the content of a specific attachment from a JIRA issue.
Expand Down
39 changes: 39 additions & 0 deletions supervisor/main.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,7 @@
ErratumHandler,
)
from .issue_handler import IssueHandler
from .preliminary_testing_handler import PreliminaryTestingHandler
from .jira_utils import get_issue
from .http_utils import with_http_sessions
from .work_queue import WorkQueue, WorkItemType, work_queue
Expand Down Expand Up @@ -251,6 +252,44 @@ def process_erratum(id_or_url: str):
asyncio.run(do_process_erratum(id))


@with_http_sessions()
async def do_preliminary_testing(key: str):
await init_kerberos_ticket()

issue = get_issue(key, full=True)
result = await PreliminaryTestingHandler(
issue,
dry_run=app_state.dry_run,
ignore_needs_attention=app_state.ignore_needs_attention,
).run()
logger.info(
"Issue %s preliminary testing processed, status=%s, reschedule_in=%s",
key,
result.status,
result.reschedule_in if result.reschedule_in >= 0 else "never",
)


@app.command()
def preliminary_testing(
key_or_url: str,
):
check_env(chat=True, jira=True, gitlab=True)

if key_or_url.startswith("http"):
m = re.match(r"https://redhat.atlassian.net/browse/([^/?]+)(?:\?.*)?$", key_or_url)
if m is None:
raise typer.BadParameter(f"Invalid issue URL {key_or_url}")
key = m.group(1)
else:
key = key_or_url

if not key.startswith("RHEL-"):
raise typer.BadParameter("Issue must be in the RHEL project")

asyncio.run(do_preliminary_testing(key))


@app.callback()
def main(
debug: bool = typer.Option(False, help="Enable debug mode."),
Expand Down
160 changes: 160 additions & 0 deletions supervisor/preliminary_testing_analyst.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,160 @@
import json
import logging
import os
from datetime import datetime, timezone
from typing import Any

from pydantic import BaseModel, Field

from beeai_framework.agents.tool_calling import ToolCallingAgent
from beeai_framework.agents.types import AgentMeta
from beeai_framework.backend import ChatModel
from beeai_framework.memory import UnconstrainedMemory
from beeai_framework.template import PromptTemplate, PromptTemplateInput

from agents.utils import get_agent_execution_config
from .supervisor_types import FullIssue, TestingState
from .tools.fetch_greenwave import FetchGreenWaveTool
from .tools.fetch_gitlab_mr_notes import FetchGitlabMrNotesTool
from .tools.read_issue import ReadIssueTool

logger = logging.getLogger(__name__)


class InputSchema(BaseModel):
issue: FullIssue = Field(description="Details of JIRA issue to analyze")
build_nvr: str | None = Field(description="NVR of the build to check, if available")
jira_pull_requests: str = Field(
description="Pull/merge requests linked in Jira Development section (JSON)"
)
current_time: datetime = Field(description="Current timestamp")


class PreliminaryTestingResult(BaseModel):
state: TestingState = Field(description="State of preliminary testing")
comment: str | None = Field(
description="Comment to add to the JIRA issue explaining the result"
)


TEMPLATE = """\
You are the preliminary testing analyst agent for Project Jötnar. Your task is to
analyze a RHEL JIRA issue and determine if the build fixing it has passed preliminary
testing — the gating and CI checks that must pass before the build can be added to a
compose and erratum.

JIRA_ISSUE_DATA: {{ issue }}
BUILD_NVR: {{ build_nvr }}
JIRA_PULL_REQUESTS (from Jira Development section): {{ jira_pull_requests }}
CURRENT_TIME: {{ current_time }}

You have two sources of test results to check. You should attempt to check all
available sources, and make your decision based on whichever results you can obtain.

1. **GreenWave / OSCI Gating Status**: If BUILD_NVR is available (not None), use
the fetch_greenwave tool with the BUILD_NVR to check the OSCI gating results.
The HTML page will show which gating test jobs ran and whether they passed or
failed. All required/gating tests must pass.
The GreenWave Monitor URL is https://gating-status.osci.redhat.com/query?nvr=BUILD_NVR
— when linking to gating results in your comment, ONLY use this exact URL pattern.
Do NOT invent or guess any other URLs for gating results.
If BUILD_NVR is None, skip this source.

2. **OSCI results in MR comments**: If JIRA_PULL_REQUESTS contains linked merge
requests (from the Jira Development section), use the fetch_gitlab_mr_notes tool
to read the comments on those MRs. Look for comments titled "Results for pipeline ..."
— these contain OSCI test results. Parse these results to determine which tests
passed and which failed.
To use fetch_gitlab_mr_notes, extract the project path and MR IID from the
JIRA_PULL_REQUESTS data. The "id" field has format "project/path!iid" and the
"url" field contains the full MR URL. The "repositoryUrl" contains the project URL
from which you can derive the project path (remove the leading https://gitlab.com/).

If a tool call fails or returns an error, note it in your comment but continue
analyzing with the results you were able to obtain. Only return tests-error if
you could not obtain results from ANY source.

Call the final_answer tool passing in the state and a comment as follows.
The comment should use JIRA comment syntax (headings, bullet points, links).
Do NOT wrap your comment in a {{panel}} macro — that will be added automatically.

If all available gating tests have passed (and MR OSCI results passed, if available):
state: tests-passed
comment: [Brief summary of what passed, with links to the GreenWave page and MR
if available. Note if any source was unavailable.]

If any required/gating tests have failed:
state: tests-failed
comment: [List the failed tests with URLs, explain which are from GreenWave and
which from MR comments]

If tests are still running (pipeline status is running, or GreenWave shows tests in progress):
state: tests-running
comment: [Brief description of what is still running]

If tests are queued but not yet started:
state: tests-pending
comment: [Brief description]

If no test results can be found from any source:
state: tests-not-running
comment: [Explain that no test results were found and manual intervention may be needed]

If all sources returned errors and no results could be obtained:
state: tests-error
comment: [Explain which sources were tried and what errors occurred]
"""


def render_prompt(input: InputSchema) -> str:
return PromptTemplate(
PromptTemplateInput(schema=InputSchema, template=TEMPLATE)
).render(input)


async def analyze_preliminary_testing(
jira_issue: FullIssue,
build_nvr: str | None,
jira_pull_requests: list[dict[str, Any]] | None = None,
) -> PreliminaryTestingResult:
tools = [
FetchGreenWaveTool(),
FetchGitlabMrNotesTool(),
ReadIssueTool(),
]

agent = ToolCallingAgent(
llm=ChatModel.from_name(
os.environ["CHAT_MODEL"],
allow_parallel_tool_calls=True,
),
memory=UnconstrainedMemory(),
tools=tools,
meta=AgentMeta(
name="PreliminaryTestingAnalyst",
description="Agent that analyzes GreenWave gating and MR comment results to determine preliminary testing status",
tools=tools,
),
)

input = InputSchema(
issue=jira_issue,
build_nvr=build_nvr,
jira_pull_requests=json.dumps(jira_pull_requests or [], indent=2),
current_time=datetime.now(timezone.utc),
)

response = await agent.run(
render_prompt(input),
expected_output=PreliminaryTestingResult,
**get_agent_execution_config(), # type: ignore
)

if response.state.result is None:
raise ValueError("Agent did not return a result")

output = PreliminaryTestingResult.model_validate_json(response.state.result.text)
logger.info(
"Preliminary testing analysis completed: %s", output.model_dump_json(indent=4)
)
return output
Loading
Loading