diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml
index 7b988a2c3..9004e1667 100644
--- a/.github/workflows/ci.yml
+++ b/.github/workflows/ci.yml
@@ -38,6 +38,8 @@ jobs:
options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5
steps:
- uses: actions/checkout@v6
+ with:
+ fetch-depth: 0
- name: Cache deps
uses: actions/cache@v5
with:
diff --git a/.gitignore b/.gitignore
index 3ab2b5e8c..45a99d0d2 100644
--- a/.gitignore
+++ b/.gitignore
@@ -94,6 +94,8 @@ erl_crash.dump
# variables.
/config/*secrets.exs
+/config/cadet.exs
+
# Uploaded file
/cs1101s
/uploads
diff --git a/config/config.exs b/config/config.exs
index 5799f7060..86a789c38 100644
--- a/config/config.exs
+++ b/config/config.exs
@@ -63,10 +63,10 @@ config :ex_aws,
config :ex_aws, :hackney_opts, recv_timeout: 660_000
-# Configure Arc File Upload
-config :arc, virtual_host: true
+# Configure Waffle File Upload
+config :waffle, virtual_host: true
# Or uncomment below to use local storage
-# config :arc, storage: Arc.Storage.Local
+# config :waffle, storage: Waffle.Storage.Local
# Configures Sentry
config :sentry,
diff --git a/config/runtime.exs b/config/runtime.exs
new file mode 100644
index 000000000..49082da62
--- /dev/null
+++ b/config/runtime.exs
@@ -0,0 +1,9 @@
+import Config
+
+# This file is executed after the code compilation on all environments.
+# It contains runtime configuration that's evaluated when the system starts.
+
+# Configure the port from environment variable if set
+if port = System.get_env("PORT") do
+ config :cadet, CadetWeb.Endpoint, http: [:inet6, port: String.to_integer(port)]
+end
diff --git a/config/test.exs b/config/test.exs
index 7e9aa8f86..04ef581a1 100644
--- a/config/test.exs
+++ b/config/test.exs
@@ -90,7 +90,7 @@ config :cadet,
client_role_arn: "test"
]
-config :arc, storage: Arc.Storage.Local
+config :waffle, storage: Waffle.Storage.Local
if "test.secrets.exs" |> Path.expand(__DIR__) |> File.exists?(),
do: import_config("test.secrets.exs")
diff --git a/lib/cadet.ex b/lib/cadet.ex
index cc451955a..b797511e5 100644
--- a/lib/cadet.ex
+++ b/lib/cadet.ex
@@ -40,8 +40,8 @@ defmodule Cadet do
def remote_assets do
quote do
- use Arc.Definition
- use Arc.Ecto.Definition
+ use Waffle.Definition
+ use Waffle.Ecto.Definition
end
end
end
diff --git a/lib/cadet/ai_comments.ex b/lib/cadet/ai_comments.ex
index e37133b98..ce8979e42 100644
--- a/lib/cadet/ai_comments.ex
+++ b/lib/cadet/ai_comments.ex
@@ -5,7 +5,7 @@ defmodule Cadet.AIComments do
import Ecto.Query
alias Cadet.Repo
- alias Cadet.AIComments.AIComment
+ alias Cadet.AIComments.{AIComment, AICommentVersion}
@doc """
Creates a new AI comment log entry.
@@ -41,37 +41,109 @@ defmodule Cadet.AIComments do
end
@doc """
- Updates the final comment for a specific submission and question.
- Returns the most recent comment entry for that submission/question.
+ Updates an existing AI comment with new attributes.
"""
- def update_final_comment(answer_id, final_comment) do
- comment = get_latest_ai_comment(answer_id)
-
- case comment do
- nil ->
+ def update_ai_comment(id, attrs) do
+ id
+ |> get_ai_comment()
+ |> case do
+ {:error, :not_found} ->
{:error, :not_found}
- _ ->
+ {:ok, comment} ->
comment
- |> AIComment.changeset(%{final_comment: final_comment})
+ |> AIComment.changeset(attrs)
|> Repo.update()
end
end
@doc """
- Updates an existing AI comment with new attributes.
+ Saves selected comment indices and finalization metadata for an AI comment.
"""
- def update_ai_comment(id, attrs) do
- id
- |> get_ai_comment()
- |> case do
- {:error, :not_found} ->
+ def save_selected_comments(answer_id, selected_indices, finalized_by_id) do
+ case get_latest_ai_comment(answer_id) do
+ nil ->
{:error, :not_found}
- {:ok, comment} ->
+ comment ->
comment
- |> AIComment.changeset(attrs)
+ |> AIComment.changeset(%{
+ selected_indices: selected_indices,
+ finalized_by_id: finalized_by_id,
+ finalized_at: DateTime.truncate(DateTime.utc_now(), :second)
+ })
|> Repo.update()
end
end
+
+ @doc """
+ Creates a new version entry for a specific comment index.
+ Automatically determines the next version number.
+ """
+ def create_comment_version(ai_comment_id, comment_index, content, editor_id) do
+ transaction_result =
+ Repo.transaction(fn ->
+ # Serialize version creation per (ai_comment_id, comment_index)
+ # to avoid duplicate version numbers.
+ case Repo.query(
+ "SELECT pg_advisory_xact_lock((($1::bigint << 32) | $2::bigint)::bigint)",
+ [ai_comment_id, comment_index]
+ ) do
+ {:ok, _} ->
+ next_version =
+ Repo.one(
+ from(v in AICommentVersion,
+ where: v.ai_comment_id == ^ai_comment_id and v.comment_index == ^comment_index,
+ select: coalesce(max(v.version_number), 0)
+ )
+ ) + 1
+
+ case %AICommentVersion{}
+ |> AICommentVersion.changeset(%{
+ ai_comment_id: ai_comment_id,
+ comment_index: comment_index,
+ version_number: next_version,
+ content: content,
+ editor_id: editor_id
+ })
+ |> Repo.insert() do
+ {:ok, version} -> version
+ {:error, changeset} -> Repo.rollback(changeset)
+ end
+
+ {:error, error} ->
+ Repo.rollback(error)
+ end
+ end)
+
+ case transaction_result do
+ {:ok, version} -> {:ok, version}
+ {:error, reason} -> {:error, reason}
+ end
+ end
+
+ @doc """
+ Gets all versions for a specific AI comment, ordered by comment_index and version_number.
+ """
+ def get_comment_versions(ai_comment_id) do
+ Repo.all(
+ from(v in AICommentVersion,
+ where: v.ai_comment_id == ^ai_comment_id,
+ order_by: [asc: v.comment_index, asc: v.version_number]
+ )
+ )
+ end
+
+ @doc """
+ Gets the latest version for a specific comment index.
+ """
+ def get_latest_version(ai_comment_id, comment_index) do
+ Repo.one(
+ from(v in AICommentVersion,
+ where: v.ai_comment_id == ^ai_comment_id and v.comment_index == ^comment_index,
+ order_by: [desc: v.version_number],
+ limit: 1
+ )
+ )
+ end
end
diff --git a/lib/cadet/ai_comments/ai_comment.ex b/lib/cadet/ai_comments/ai_comment.ex
index 64d5d4cfe..bcd250378 100644
--- a/lib/cadet/ai_comments/ai_comment.ex
+++ b/lib/cadet/ai_comments/ai_comment.ex
@@ -11,15 +11,19 @@ defmodule Cadet.AIComments.AIComment do
field(:answers_json, :string)
field(:response, :string)
field(:error, :string)
- field(:final_comment, :string)
+ field(:selected_indices, {:array, :integer})
+ field(:finalized_at, :utc_datetime)
belongs_to(:answer, Cadet.Assessments.Answer)
+ belongs_to(:finalized_by, Cadet.Accounts.User, foreign_key: :finalized_by_id)
+
+ has_many(:versions, Cadet.AIComments.AICommentVersion)
timestamps()
end
@required_fields ~w(answer_id raw_prompt answers_json)a
- @optional_fields ~w(response error final_comment)a
+ @optional_fields ~w(response error selected_indices finalized_by_id finalized_at)a
def changeset(ai_comment, attrs) do
ai_comment
diff --git a/lib/cadet/ai_comments/ai_comment_version.ex b/lib/cadet/ai_comments/ai_comment_version.ex
new file mode 100644
index 000000000..2de1b88fa
--- /dev/null
+++ b/lib/cadet/ai_comments/ai_comment_version.ex
@@ -0,0 +1,32 @@
+defmodule Cadet.AIComments.AICommentVersion do
+ @moduledoc """
+ Defines the schema and changeset for AI comment versions.
+ Tracks per-comment edits made by tutors.
+ """
+
+ use Ecto.Schema
+ import Ecto.Changeset
+
+ schema "ai_comment_versions" do
+ field(:comment_index, :integer)
+ field(:version_number, :integer)
+ field(:content, :string)
+
+ belongs_to(:ai_comment, Cadet.AIComments.AIComment)
+ belongs_to(:editor, Cadet.Accounts.User, foreign_key: :editor_id)
+
+ timestamps()
+ end
+
+ @required_fields ~w(ai_comment_id comment_index version_number content)a
+ @optional_fields ~w(editor_id)a
+
+ def changeset(version, attrs) do
+ version
+ |> cast(attrs, @required_fields ++ @optional_fields)
+ |> validate_required(@required_fields)
+ |> foreign_key_constraint(:ai_comment_id)
+ |> foreign_key_constraint(:editor_id)
+ |> unique_constraint([:ai_comment_id, :comment_index, :version_number])
+ end
+end
diff --git a/lib/cadet/assessments/assessment.ex b/lib/cadet/assessments/assessment.ex
index edfce9bb6..c13371127 100644
--- a/lib/cadet/assessments/assessment.ex
+++ b/lib/cadet/assessments/assessment.ex
@@ -4,7 +4,7 @@ defmodule Cadet.Assessments.Assessment do
(mission, sidequest, path, and contest)
"""
use Cadet, :model
- use Arc.Ecto.Schema
+ use Waffle.Ecto.Schema
alias Cadet.Repo
alias Cadet.Assessments.{AssessmentAccess, Question, SubmissionStatus, Upload}
@@ -21,6 +21,7 @@ defmodule Cadet.Assessments.Assessment do
field(:question_count, :integer, virtual: true)
field(:graded_count, :integer, virtual: true)
field(:is_grading_published, :boolean, virtual: true)
+ field(:has_llm_questions, :boolean, virtual: true, default: false)
field(:title, :string)
field(:is_published, :boolean, default: false)
field(:summary_short, :string)
@@ -37,6 +38,12 @@ defmodule Cadet.Assessments.Assessment do
field(:has_token_counter, :boolean, default: false)
field(:has_voting_features, :boolean, default: false)
field(:llm_assessment_prompt, :string, default: nil)
+ field(:llm_input_cost, :decimal)
+ field(:llm_output_cost, :decimal)
+ field(:llm_total_input_tokens, :integer, default: 0)
+ field(:llm_total_output_tokens, :integer, default: 0)
+ field(:llm_total_cached_tokens, :integer, default: 0)
+ field(:llm_total_cost, :decimal, default: Decimal.new("0.0"))
belongs_to(:config, AssessmentConfig)
belongs_to(:course, Course)
@@ -47,7 +54,10 @@ defmodule Cadet.Assessments.Assessment do
@required_fields ~w(title open_at close_at number course_id config_id max_team_size)a
@optional_fields ~w(reading summary_short summary_long
- is_published story cover_picture access password has_token_counter has_voting_features llm_assessment_prompt)a
+ is_published story cover_picture access password has_token_counter
+ has_voting_features llm_assessment_prompt
+ llm_input_cost llm_output_cost llm_total_input_tokens
+ llm_total_output_tokens llm_total_cached_tokens llm_total_cost)a
@optional_file_fields ~w(mission_pdf)a
def changeset(assessment, params) do
@@ -60,12 +70,14 @@ defmodule Cadet.Assessments.Assessment do
|> cast_attachments(params, @optional_file_fields)
|> cast(params, @required_fields ++ @optional_fields)
|> validate_required(@required_fields)
+ # ADD THIS LINE HERE to apply your defaults
+ |> put_default_costs()
|> add_belongs_to_id_from_model([:config, :course], params)
|> foreign_key_constraint(:config_id)
|> foreign_key_constraint(:course_id)
|> unique_constraint([:number, :course_id])
- |> validate_config_course
- |> validate_open_close_date
+ |> validate_config_course()
+ |> validate_open_close_date()
|> validate_number(:max_team_size, greater_than_or_equal_to: 1)
end
@@ -86,6 +98,20 @@ defmodule Cadet.Assessments.Assessment do
end
end
+ defp put_default_costs(changeset) do
+ changeset
+ |> put_fallback(:llm_input_cost, Decimal.new("3.20"))
+ |> put_fallback(:llm_output_cost, Decimal.new("12.80"))
+ end
+
+ defp put_fallback(changeset, field, default_val) do
+ if get_field(changeset, field) == nil do
+ put_change(changeset, field, default_val)
+ else
+ changeset
+ end
+ end
+
defp validate_open_close_date(changeset) do
validate_change(changeset, :open_at, fn :open_at, open_at ->
if Timex.before?(open_at, get_field(changeset, :close_at)) do
diff --git a/lib/cadet/assessments/assessments.ex b/lib/cadet/assessments/assessments.ex
index 426fc8914..ba8087b0e 100644
--- a/lib/cadet/assessments/assessments.ex
+++ b/lib/cadet/assessments/assessments.ex
@@ -604,7 +604,7 @@ defmodule Cadet.Assessments do
)
end
- defp is_voting_assigned(assessment_ids) do
+ defp voting_assigned?(assessment_ids) do
Logger.debug("Checking if voting is assigned for assessment IDs: #{inspect(assessment_ids)}")
voting_assigned_question_ids =
@@ -639,7 +639,7 @@ defmodule Cadet.Assessments do
is_voting_assigned_map =
assessments
|> Enum.map(& &1.id)
- |> is_voting_assigned()
+ |> voting_assigned?()
Enum.map(assessments, fn a ->
a = Map.put(a, :is_voting_published, Map.get(is_voting_assigned_map, a.id, false))
@@ -910,7 +910,7 @@ defmodule Cadet.Assessments do
)
if is_reassigning_voting do
- if is_voting_published(assessment_id) do
+ if voting_published?(assessment_id) do
Logger.info("Deleting existing submissions for assessment #{assessment_id}")
Submission
@@ -955,7 +955,7 @@ defmodule Cadet.Assessments do
end
end
- defp is_voting_published(assessment_id) do
+ defp voting_published?(assessment_id) do
Logger.info("Checking if voting is published for assessment #{assessment_id}")
voting_assigned_question_ids =
@@ -1451,7 +1451,16 @@ defmodule Cadet.Assessments do
end
end
+ @doc """
+ Deletes all AI comment logs associated with the given list of answer IDs.
+ """
+ def delete_comments_for_answers(answer_ids) when is_list(answer_ids) do
+ query = from(c in "ai_comment_logs", where: c.answer_id in ^answer_ids)
+ Repo.delete_all(query)
+ end
+
@dialyzer {:nowarn_function, unsubmit_submission: 2}
+ @dialyzer {:nowarn_function, perform_unsubmit_transaction: 3}
def unsubmit_submission(
submission_id,
cr = %CourseRegistration{id: course_reg_id, role: role}
@@ -1459,27 +1468,75 @@ defmodule Cadet.Assessments do
when is_ecto_id(submission_id) do
Logger.info("Unsubmitting submission #{submission_id} for user #{course_reg_id}")
+ case validate_unsubmit_submission(submission_id, cr) do
+ {:ok, submission} ->
+ perform_unsubmit_transaction(submission, submission_id, cr)
+
+ {:error, reason} ->
+ {:error, reason}
+ end
+ end
+
+ defp validate_unsubmit_submission(submission_id, cr = %CourseRegistration{role: role}) do
submission =
Submission
|> join(:inner, [s], a in assoc(s, :assessment))
|> preload([_, a], assessment: a)
|> Repo.get(submission_id)
- # allows staff to unsubmit own assessment
- bypass = role in @bypass_closed_roles and submission.student_id == course_reg_id
- Logger.info("Bypass restrictions: #{bypass}")
+ case is_map(submission) do
+ true ->
+ # allows staff to unsubmit own assessment
+ bypass = role in @bypass_closed_roles and submission.student_id == cr.id
+
+ with {:is_open?, true} <- {:is_open?, bypass or open?(submission.assessment)},
+ {:status, :submitted} <- {:status, submission.status},
+ {:allowed_to_unsubmit?, true} <-
+ {:allowed_to_unsubmit?,
+ role == :admin or bypass or is_nil(submission.student_id) or
+ Cadet.Accounts.Query.avenger_of?(cr, submission.student_id)},
+ {:is_grading_published?, false} <-
+ {:is_grading_published?, submission.is_grading_published} do
+ {:ok, submission}
+ else
+ {:is_open?, false} ->
+ Logger.error("Assessment for submission #{submission_id} is not open")
+ {:error, {:forbidden, "Assessment not open"}}
- with {:submission_found?, true} <- {:submission_found?, is_map(submission)},
- {:is_open?, true} <- {:is_open?, bypass or is_open?(submission.assessment)},
- {:status, :submitted} <- {:status, submission.status},
- {:allowed_to_unsubmit?, true} <-
- {:allowed_to_unsubmit?,
- role == :admin or bypass or is_nil(submission.student_id) or
- Cadet.Accounts.Query.avenger_of?(cr, submission.student_id)},
- {:is_grading_published?, false} <-
- {:is_grading_published?, submission.is_grading_published} do
- Logger.info("All checks passed for unsubmitting submission #{submission_id}")
+ {:status, :attempting} ->
+ Logger.error("Submission #{submission_id} is still attempting")
+ {:error, {:bad_request, "Some questions have not been attempted"}}
+
+ {:status, :attempted} ->
+ Logger.error("Submission #{submission_id} has already been attempted")
+ {:error, {:bad_request, "Assessment has not been submitted"}}
+
+ {:allowed_to_unsubmit?, false} ->
+ Logger.error("User #{cr.id} is not allowed to unsubmit submission #{submission_id}")
+ {:error, {:forbidden, "Only Avenger of student or Admin is permitted to unsubmit"}}
+
+ {:is_grading_published?, true} ->
+ Logger.error("Grading for submission #{submission_id} has already been published")
+ {:error, {:forbidden, "Grading has not been unpublished"}}
+
+ _ ->
+ Logger.error(
+ "An unknown error occurred while unsubmitting submission #{submission_id}"
+ )
+
+ {:error, {:internal_server_error, "Please try again later."}}
+ end
+ false ->
+ Logger.error("Submission #{submission_id} not found")
+ {:error, {:not_found, "Submission not found"}}
+ end
+ end
+
+ defp perform_unsubmit_transaction(submission, submission_id, cr) do
+ Logger.info("All checks passed for unsubmitting submission #{submission_id}")
+
+ multi =
Multi.new()
|> Multi.run(
:rollback_submission,
@@ -1490,7 +1547,7 @@ defmodule Cadet.Assessments do
|> Submission.changeset(%{
status: :attempted,
xp_bonus: 0,
- unsubmitted_by_id: course_reg_id,
+ unsubmitted_by_id: cr.id,
unsubmitted_at: Timex.now()
})
|> Repo.update()
@@ -1529,88 +1586,77 @@ defmodule Cadet.Assessments do
end
end)
end)
- |> Repo.transaction()
-
- case submission.student_id do
- # Team submission, handle notifications for team members
- nil ->
- Logger.info("Handling unsubmit notifications for team submission #{submission.id}")
- team = Repo.get(Team, submission.team_id)
-
- query =
- from(t in Team,
- join: tm in TeamMember,
- on: t.id == tm.team_id,
- join: cr in CourseRegistration,
- on: tm.student_id == cr.id,
- where: t.id == ^team.id,
- select: cr.id
- )
-
- team_members = Repo.all(query)
-
- Enum.each(team_members, fn tm_id ->
- Logger.info("Sending unsubmit notification to team member #{tm_id}")
+ |> Multi.run(:delete_ai_comments, fn _repo, _ ->
+ Logger.info("Deleting AI comments for submission #{submission_id}")
- Notifications.handle_unsubmit_notifications(
- submission.assessment.id,
- Repo.get(CourseRegistration, tm_id)
- )
- end)
-
- student_id ->
- Logger.info(
- "Handling unsubmit notifications for individual submission #{submission.id}"
- )
+ answer_ids =
+ Answer
+ |> where(submission_id: ^submission_id)
+ |> select([a], a.id)
+ |> Repo.all()
- Notifications.handle_unsubmit_notifications(
- submission.assessment.id,
- Repo.get(CourseRegistration, student_id)
- )
- end
+ delete_comments_for_answers(answer_ids)
+ {:ok, nil}
+ end)
- Logger.info("Removing grading notifications for submission #{submission.id}")
+ transaction_result = Repo.transaction(multi)
+
+ case transaction_result do
+ {:ok, _result} ->
+ Logger.info("Successfully unsubmitting submission #{submission_id}")
+
+ case submission.student_id do
+ # Team submission, handle notifications for team members
+ nil ->
+ Logger.info("Handling unsubmit notifications for team submission #{submission.id}")
+ team = Repo.get(Team, submission.team_id)
+
+ query =
+ from(t in Team,
+ join: tm in TeamMember,
+ on: t.id == tm.team_id,
+ join: cr in CourseRegistration,
+ on: tm.student_id == cr.id,
+ where: t.id == ^team.id,
+ select: cr.id
+ )
- # Remove grading notifications for submissions
- Notification
- |> where(submission_id: ^submission_id, type: :submitted)
- |> select([n], n.id)
- |> Repo.all()
- |> Notifications.acknowledge(cr)
+ team_members = Repo.all(query)
- Logger.info("Successfully unsubmitting submission #{submission_id}")
- {:ok, nil}
- else
- {:submission_found?, false} ->
- Logger.error("Submission #{submission_id} not found")
- {:error, {:not_found, "Submission not found"}}
+ Enum.each(team_members, fn tm_id ->
+ Logger.info("Sending unsubmit notification to team member #{tm_id}")
- {:is_open?, false} ->
- Logger.error("Assessment for submission #{submission_id} is not open")
- {:error, {:forbidden, "Assessment not open"}}
+ Notifications.handle_unsubmit_notifications(
+ submission.assessment.id,
+ Repo.get(CourseRegistration, tm_id)
+ )
+ end)
- {:status, :attempting} ->
- Logger.error("Submission #{submission_id} is still attempting")
- {:error, {:bad_request, "Some questions have not been attempted"}}
+ student_id ->
+ Logger.info(
+ "Handling unsubmit notifications for individual submission #{submission.id}"
+ )
- {:status, :attempted} ->
- Logger.error("Submission #{submission_id} has already been attempted")
- {:error, {:bad_request, "Assessment has not been submitted"}}
+ Notifications.handle_unsubmit_notifications(
+ submission.assessment.id,
+ Repo.get(CourseRegistration, student_id)
+ )
+ end
- {:allowed_to_unsubmit?, false} ->
- Logger.error(
- "User #{course_reg_id} is not allowed to unsubmit submission #{submission_id}"
- )
+ Logger.info("Removing grading notifications for submission #{submission.id}")
- {:error, {:forbidden, "Only Avenger of student or Admin is permitted to unsubmit"}}
+ # Remove grading notifications for submissions
+ Notification
+ |> where(submission_id: ^submission_id, type: :submitted)
+ |> select([n], n.id)
+ |> Repo.all()
+ |> Notifications.acknowledge(cr)
- {:is_grading_published?, true} ->
- Logger.error("Grading for submission #{submission_id} has already been published")
- {:error, {:forbidden, "Grading has not been unpublished"}}
+ {:ok, nil}
- _ ->
- Logger.error("An unknown error occurred while unsubmitting submission #{submission_id}")
- {:error, {:internal_server_error, "Please try again later."}}
+ {:error, _failed_operation, failed_value, _changes_so_far} ->
+ Logger.error("Failed to unsubmit submission #{submission_id}: #{inspect(failed_value)}")
+ {:error, {:internal_server_error, "Failed to unsubmit submission"}}
end
end
@@ -1647,7 +1693,7 @@ defmodule Cadet.Assessments do
{:status, :submitted} <- {:status, submission.status},
{:is_manually_graded?, true} <-
{:is_manually_graded?, submission.assessment.config.is_manually_graded},
- {:fully_graded?, true} <- {:fully_graded?, is_fully_graded?(submission_id)},
+ {:fully_graded?, true} <- {:fully_graded?, fully_graded?(submission_id)},
{:allowed_to_publish?, true} <-
{:allowed_to_publish?,
role == :admin or bypass or
@@ -3148,7 +3194,7 @@ defmodule Cadet.Assessments do
end
end
- defp is_fully_graded?(submission_id) do
+ defp fully_graded?(submission_id) do
submission =
Submission
|> Repo.get_by(id: submission_id)
@@ -3169,7 +3215,7 @@ defmodule Cadet.Assessments do
question_count == graded_count
end
- def is_fully_autograded?(submission_id) do
+ def fully_autograded?(submission_id) do
submission =
Submission
|> Repo.get_by(id: submission_id)
@@ -3235,7 +3281,7 @@ defmodule Cadet.Assessments do
{:ok, _} <- Repo.update(changeset) do
update_xp_bonus(submission)
- if is_grading_auto_published and is_fully_graded?(submission_id) do
+ if is_grading_auto_published and fully_graded?(submission_id) do
publish_grading(submission_id, cr)
end
@@ -3350,8 +3396,8 @@ defmodule Cadet.Assessments do
end
# Checks if an assessment is open and published.
- @spec is_open?(Assessment.t()) :: boolean()
- def is_open?(%Assessment{open_at: open_at, close_at: close_at, is_published: is_published}) do
+ @spec open?(Assessment.t()) :: boolean()
+ def open?(%Assessment{open_at: open_at, close_at: close_at, is_published: is_published}) do
Timex.between?(Timex.now(), open_at, close_at, inclusive: :start) and is_published
end
@@ -3610,4 +3656,65 @@ defmodule Cadet.Assessments do
Repo.one(query)
end
+
+ def update_llm_usage_and_cost(assessment_id, usage) do
+ prompt = extract_val(usage, "prompt_tokens", :prompt_tokens, 0)
+ completion = extract_val(usage, "completion_tokens", :completion_tokens, 0)
+ details = extract_val(usage, "prompt_tokens_details", :prompt_tokens_details, %{})
+ cached = extract_val(details, "cached_tokens", :cached_tokens, 0)
+
+ # Fetch assessment to get cost rates
+ case Repo.get(Assessment, assessment_id) do
+ nil ->
+ Logger.error("Assessment not found when updating LLM usage and cost: #{assessment_id}")
+ {:error, :not_found}
+
+ assessment ->
+ input_rate = get_valid_rate(assessment.llm_input_cost, "3.20")
+ output_rate = get_valid_rate(assessment.llm_output_cost, "12.80")
+
+ raw_cost = calculate_token_cost(prompt, completion, input_rate, output_rate)
+ new_cost = Decimal.round(raw_cost, 6, :half_up)
+
+ # Atomic database-level updates to prevent race conditions
+ # All increments happen in a single transaction at the database level
+ query =
+ from(a in Assessment,
+ where: a.id == ^assessment_id,
+ update: [
+ set: [
+ llm_total_input_tokens:
+ fragment("COALESCE(llm_total_input_tokens, 0) + ?", ^prompt),
+ llm_total_output_tokens:
+ fragment("COALESCE(llm_total_output_tokens, 0) + ?", ^completion),
+ llm_total_cached_tokens:
+ fragment("COALESCE(llm_total_cached_tokens, 0) + ?", ^cached),
+ llm_total_cost: fragment("COALESCE(llm_total_cost, 0) + ?", ^new_cost)
+ ]
+ ]
+ )
+
+ Repo.update_all(query, [])
+ {:ok, nil}
+ end
+ end
+
+ defp extract_val(map, string_key, atom_key, default) do
+ Map.get(map, string_key) || Map.get(map, atom_key) || default
+ end
+
+ defp get_valid_rate(rate, default_rate) do
+ if rate && Decimal.gt?(rate, 0) do
+ rate
+ else
+ Decimal.new(default_rate)
+ end
+ end
+
+ defp calculate_token_cost(prompt, completion, input_rate, output_rate) do
+ million = Decimal.new(1_000_000)
+ in_cost = Decimal.div(Decimal.mult(Decimal.new(prompt), input_rate), million)
+ out_cost = Decimal.div(Decimal.mult(Decimal.new(completion), output_rate), million)
+ Decimal.add(in_cost, out_cost)
+ end
end
diff --git a/lib/cadet/assessments/query.ex b/lib/cadet/assessments/query.ex
index 85cca7468..b5cd7639c 100644
--- a/lib/cadet/assessments/query.ex
+++ b/lib/cadet/assessments/query.ex
@@ -20,7 +20,8 @@ defmodule Cadet.Assessments.Query do
|> select([a, q], %Assessment{
a
| max_xp: q.max_xp,
- question_count: q.question_count
+ question_count: q.question_count,
+ has_llm_questions: q.has_llm_questions
})
end
@@ -49,7 +50,33 @@ defmodule Cadet.Assessments.Query do
|> select([q], %{
assessment_id: q.assessment_id,
max_xp: sum(q.max_xp),
- question_count: count(q.id)
+ question_count: count(q.id),
+ has_llm_questions:
+ fragment(
+ "bool_or(? ->> 'llm_prompt' IS NOT NULL AND ? ->> 'llm_prompt' != '')",
+ q.question,
+ q.question
+ )
})
end
+
+ @doc """
+ Checks if a course has any assessments with LLM content.
+ Returns true if any assessment has questions with llm_prompt or llm_assessment_prompt.
+ """
+ @spec course_has_llm_content?(integer()) :: boolean()
+ def course_has_llm_content?(course_id) when is_ecto_id(course_id) do
+ Assessment
+ |> where(course_id: ^course_id)
+ |> join(:left, [a], q in subquery(assessments_aggregates()), on: a.id == q.assessment_id)
+ |> select([a, q], %{
+ has_llm_questions: q.has_llm_questions,
+ llm_assessment_prompt: a.llm_assessment_prompt
+ })
+ |> Repo.all()
+ |> Enum.any?(fn assessment ->
+ assessment.has_llm_questions == true or
+ assessment.llm_assessment_prompt not in [nil, ""]
+ end)
+ end
end
diff --git a/lib/cadet/assessments/question_types/mcq_question.ex b/lib/cadet/assessments/question_types/mcq_question.ex
index b99a5031f..f6ede3a73 100644
--- a/lib/cadet/assessments/question_types/mcq_question.ex
+++ b/lib/cadet/assessments/question_types/mcq_question.ex
@@ -20,7 +20,7 @@ defmodule Cadet.Assessments.QuestionTypes.MCQQuestion do
|> cast(params, @required_fields)
|> cast_embed(:choices, with: &MCQChoice.changeset/2, required: true)
|> validate_one_correct_answer
- |> validate_required(@required_fields ++ ~w(choices)a)
+ |> validate_required(@required_fields)
end
defp validate_one_correct_answer(changeset) do
diff --git a/lib/cadet/courses/course.ex b/lib/cadet/courses/course.ex
index b8a113be1..8906a7ab1 100644
--- a/lib/cadet/courses/course.ex
+++ b/lib/cadet/courses/course.ex
@@ -27,7 +27,9 @@ defmodule Cadet.Courses.Course do
source_chapter: integer(),
source_variant: String.t(),
module_help_text: String.t(),
- assets_prefix: String.t() | nil
+ assets_prefix: String.t() | nil,
+ has_llm_content: boolean(),
+ assessment_configs: [String.t()]
}
schema "courses" do
@@ -54,6 +56,12 @@ defmodule Cadet.Courses.Course do
# for now, only settable from database
field(:assets_prefix, :string, default: nil)
+ # Virtual field computed at runtime based on assessments in course
+ field(:has_llm_content, :boolean, virtual: true, default: false)
+
+ # Virtual field populated at runtime by get_course_config/1
+ field(:assessment_configs, {:array, :string}, virtual: true, default: [])
+
has_many(:assessment_config, AssessmentConfig)
timestamps()
diff --git a/lib/cadet/courses/courses.ex b/lib/cadet/courses/courses.ex
index 965110673..da7703cfc 100644
--- a/lib/cadet/courses/courses.ex
+++ b/lib/cadet/courses/courses.ex
@@ -59,7 +59,7 @@ defmodule Cadet.Courses do
@doc """
Returns the course configuration for the specified course.
"""
- @spec get_course_config(integer) ::
+ @spec get_course_config(integer | binary) ::
{:ok, Course.t()} | {:error, {:bad_request, String.t()}}
def get_course_config(course_id) when is_ecto_id(course_id) do
Logger.info("Retrieving course configuration for course #{course_id}")
@@ -77,8 +77,12 @@ defmodule Cadet.Courses do
|> Enum.sort(&(&1.order < &2.order))
|> Enum.map(& &1.type)
+ has_llm_content = Assessments.Query.course_has_llm_content?(course_id)
+
Logger.info("Successfully retrieved course configuration for course #{course_id}")
- {:ok, Map.put_new(course, :assessment_configs, assessment_configs)}
+
+ {:ok,
+ %{course | assessment_configs: assessment_configs, has_llm_content: has_llm_content}}
end
end
diff --git a/lib/cadet/courses/sourcecast.ex b/lib/cadet/courses/sourcecast.ex
index 330663ab8..19683bb14 100644
--- a/lib/cadet/courses/sourcecast.ex
+++ b/lib/cadet/courses/sourcecast.ex
@@ -3,7 +3,7 @@ defmodule Cadet.Courses.Sourcecast do
Sourcecast stores audio files and deltas for playback
"""
use Cadet, :model
- use Arc.Ecto.Schema
+ use Waffle.Ecto.Schema
alias Cadet.Accounts.User
alias Cadet.Courses.{Course, SourcecastUpload}
diff --git a/lib/cadet/courses/sourcecast_upload.ex b/lib/cadet/courses/sourcecast_upload.ex
index 1d9a57c78..0b75130a7 100644
--- a/lib/cadet/courses/sourcecast_upload.ex
+++ b/lib/cadet/courses/sourcecast_upload.ex
@@ -2,8 +2,8 @@ defmodule Cadet.Courses.SourcecastUpload do
@moduledoc """
Represents an uploaded file for Sourcecast
"""
- use Arc.Definition
- use Arc.Ecto.Definition
+ use Waffle.Definition
+ use Waffle.Ecto.Definition
@extension_whitelist ~w(.wav)
@versions [:original]
diff --git a/lib/cadet/jobs/autograder/grading_job.ex b/lib/cadet/jobs/autograder/grading_job.ex
index e15f33480..4d15731ea 100644
--- a/lib/cadet/jobs/autograder/grading_job.ex
+++ b/lib/cadet/jobs/autograder/grading_job.ex
@@ -323,7 +323,7 @@ defmodule Cadet.Autograder.GradingJob do
is_grading_auto_published = assessment_config.is_grading_auto_published
is_manually_graded = assessment_config.is_manually_graded
- if Assessments.is_fully_autograded?(submission_id) and is_grading_auto_published and
+ if Assessments.fully_autograded?(submission_id) and is_grading_auto_published and
not is_manually_graded do
Assessments.publish_grading(submission_id)
end
diff --git a/lib/cadet/jobs/autograder/result_store_worker.ex b/lib/cadet/jobs/autograder/result_store_worker.ex
index 500004d50..0e4649cbf 100644
--- a/lib/cadet/jobs/autograder/result_store_worker.ex
+++ b/lib/cadet/jobs/autograder/result_store_worker.ex
@@ -96,7 +96,7 @@ defmodule Cadet.Autograder.ResultStoreWorker do
is_grading_auto_published = assessment_config.is_grading_auto_published
is_manually_graded = assessment_config.is_manually_graded
- if Assessments.is_fully_autograded?(submission_id) and is_grading_auto_published and
+ if Assessments.fully_autograded?(submission_id) and is_grading_auto_published and
not is_manually_graded do
Assessments.publish_grading(submission_id)
end
diff --git a/lib/cadet/jobs/xml_parser.ex b/lib/cadet/jobs/xml_parser.ex
index 206ed8c60..caf575bf7 100644
--- a/lib/cadet/jobs/xml_parser.ex
+++ b/lib/cadet/jobs/xml_parser.ex
@@ -205,7 +205,9 @@ defmodule Cadet.Updater.XMLParser do
template: ~x"./SNIPPET/TEMPLATE/text()" |> transform_by(&process_charlist/1),
postpend: ~x"./SNIPPET/POSTPEND/text()" |> transform_by(&process_charlist/1),
solution: ~x"./SNIPPET/SOLUTION/text()" |> transform_by(&process_charlist/1),
- llm_prompt: ~x"./LLM_GRADING_PROMPT/text()" |> transform_by(&process_charlist/1)
+ llm_prompt:
+ ~x"(./LLM_QUESTION_PROMPT/text() | ./LLM_GRADING_PROMPT/text())[1]"so
+ |> transform_by(&process_charlist/1)
),
entity
|> xmap(
diff --git a/lib/cadet/llm_stats.ex b/lib/cadet/llm_stats.ex
new file mode 100644
index 000000000..4fce20e3f
--- /dev/null
+++ b/lib/cadet/llm_stats.ex
@@ -0,0 +1,357 @@
+defmodule Cadet.LLMStats do
+ @moduledoc """
+ Context module for LLM usage statistics and feedback.
+ Provides per-assessment and per-question statistics and feedback management.
+ """
+
+ import Ecto.Query
+ alias Cadet.Repo
+ alias Cadet.Assessments.{Assessment, Question}
+ alias Cadet.LLMStats.{LLMUsageLog, LLMFeedback}
+
+ # =====================
+ # Usage Logging
+ # =====================
+
+ @doc """
+ Logs a usage event when "Generate Comments" is invoked.
+ """
+ def log_usage(attrs) do
+ %LLMUsageLog{}
+ |> LLMUsageLog.changeset(attrs)
+ |> Repo.insert()
+ end
+
+ # =====================
+ # Assessment-level Statistics
+ # =====================
+
+ @doc """
+ Returns LLM usage statistics for a specific assessment.
+
+ Returns:
+ - total_uses: total "Generate Comments" invocations
+ - unique_submissions: unique submissions that had LLM used
+ - unique_users: unique users who used the feature
+ - questions: per-question breakdown with stats
+ - llm_total_cost: Total cost in SGD
+ - llm_total_input_tokens: Total standard input tokens
+ - llm_total_output_tokens: Total output tokens
+ - llm_total_cached_tokens: Total cached input tokens
+ """
+ def get_assessment_statistics(course_id, assessment_id) do
+ base =
+ from(l in LLMUsageLog,
+ where: l.course_id == ^course_id and l.assessment_id == ^assessment_id
+ )
+
+ total_uses = Repo.aggregate(base, :count)
+
+ unique_submissions =
+ Repo.one(
+ from(l in base,
+ select: count(l.submission_id, :distinct)
+ )
+ )
+
+ unique_users =
+ Repo.one(
+ from(l in base,
+ select: count(l.user_id, :distinct)
+ )
+ )
+
+ questions =
+ Repo.all(
+ from(l in LLMUsageLog,
+ join: q in assoc(l, :question),
+ where: l.course_id == ^course_id and l.assessment_id == ^assessment_id,
+ group_by: [q.id, q.display_order],
+ select: %{
+ question_id: q.id,
+ display_order: q.display_order,
+ total_uses: count(l.id),
+ unique_submissions: count(l.submission_id, :distinct),
+ unique_users: count(l.user_id, :distinct)
+ },
+ order_by: [asc: q.display_order]
+ )
+ )
+
+ costs =
+ Repo.one(
+ from(a in Assessment,
+ where: a.id == ^assessment_id and a.course_id == ^course_id,
+ select: %{
+ llm_total_cost: a.llm_total_cost,
+ llm_total_input_tokens: a.llm_total_input_tokens,
+ llm_total_output_tokens: a.llm_total_output_tokens,
+ llm_total_cached_tokens: a.llm_total_cached_tokens
+ }
+ )
+ ) || %{}
+
+ %{
+ total_uses: total_uses,
+ unique_submissions: unique_submissions,
+ unique_users: unique_users,
+ questions: questions,
+ llm_total_cost: Map.get(costs, :llm_total_cost) || Decimal.new("0.0"),
+ llm_total_input_tokens: Map.get(costs, :llm_total_input_tokens) || 0,
+ llm_total_output_tokens: Map.get(costs, :llm_total_output_tokens) || 0,
+ llm_total_cached_tokens: Map.get(costs, :llm_total_cached_tokens) || 0
+ }
+ end
+
+ def get_course_statistics(course_id) do
+ assessments = fetch_llm_course_assessments(course_id)
+ assessments_with_stats = Enum.map(assessments, &build_course_assessment_stats(course_id, &1))
+
+ %{
+ course_total_input_tokens: sum_assessment_input_tokens(assessments_with_stats),
+ course_total_output_tokens: sum_assessment_output_tokens(assessments_with_stats),
+ course_total_cost: sum_assessment_costs(assessments_with_stats),
+ assessments: assessments_with_stats
+ }
+ end
+
+ defp fetch_llm_course_assessments(course_id) do
+ Repo.all(
+ from(a in Assessment,
+ where: a.course_id == ^course_id and a.is_published == true,
+ where:
+ fragment("? IS NOT NULL AND ? != ''", a.llm_assessment_prompt, a.llm_assessment_prompt) or
+ fragment(
+ "EXISTS (SELECT 1 FROM questions q WHERE q.assessment_id = ? AND q.question ->> 'llm_prompt' IS NOT NULL AND q.question ->> 'llm_prompt' != '')",
+ a.id
+ ),
+ join: c in assoc(a, :config),
+ select: %{
+ assessment_id: a.id,
+ title: a.title,
+ category: c.type,
+ llm_total_input_tokens: coalesce(a.llm_total_input_tokens, 0),
+ llm_total_output_tokens: coalesce(a.llm_total_output_tokens, 0),
+ llm_total_cost: coalesce(a.llm_total_cost, type(^Decimal.new("0.0"), :decimal))
+ }
+ )
+ )
+ end
+
+ defp build_course_assessment_stats(course_id, assessment) do
+ total_uses = get_assessment_total_uses(course_id, assessment.assessment_id)
+
+ %{
+ assessment_id: assessment.assessment_id,
+ title: assessment.title,
+ category: assessment.category,
+ total_uses: total_uses,
+ avg_rating: get_assessment_avg_rating(course_id, assessment.assessment_id),
+ llm_total_input_tokens: assessment.llm_total_input_tokens,
+ llm_total_output_tokens: assessment.llm_total_output_tokens,
+ llm_total_cost: assessment.llm_total_cost,
+ questions: get_question_stats(course_id, assessment, total_uses)
+ }
+ end
+
+ defp get_assessment_total_uses(course_id, assessment_id) do
+ Repo.one(
+ from(l in LLMUsageLog,
+ where: l.course_id == ^course_id and l.assessment_id == ^assessment_id,
+ select: count(l.id)
+ )
+ ) || 0
+ end
+
+ defp get_assessment_avg_rating(course_id, assessment_id) do
+ avg_rating =
+ Repo.one(
+ from(f in LLMFeedback,
+ where: f.course_id == ^course_id and f.assessment_id == ^assessment_id,
+ where: not is_nil(f.rating),
+ select: avg(f.rating)
+ )
+ )
+
+ normalize_avg_rating(avg_rating)
+ end
+
+ defp get_llm_questions(assessment_id) do
+ Repo.all(
+ from(q in Question,
+ where: q.assessment_id == ^assessment_id,
+ where:
+ fragment(
+ "? ->> 'llm_prompt' IS NOT NULL AND ? ->> 'llm_prompt' != ''",
+ q.question,
+ q.question
+ ),
+ order_by: [asc: q.display_order],
+ select: %{question_id: q.id, display_order: q.display_order}
+ )
+ )
+ end
+
+ defp get_question_stats(course_id, assessment, total_uses) do
+ llm_questions = get_llm_questions(assessment.assessment_id)
+ Enum.map(llm_questions, &build_question_stats(course_id, assessment, total_uses, &1))
+ end
+
+ defp build_question_stats(course_id, assessment, total_uses, question) do
+ question_uses =
+ get_question_total_uses(course_id, assessment.assessment_id, question.question_id)
+
+ %{
+ question_id: question.question_id,
+ display_order: question.display_order,
+ total_uses: question_uses,
+ avg_rating:
+ get_question_avg_rating(course_id, assessment.assessment_id, question.question_id),
+ llm_total_input_tokens:
+ proportional_token_count(assessment.llm_total_input_tokens, question_uses, total_uses),
+ llm_total_output_tokens:
+ proportional_token_count(assessment.llm_total_output_tokens, question_uses, total_uses),
+ llm_total_cost: proportional_cost(assessment.llm_total_cost, question_uses, total_uses)
+ }
+ end
+
+ defp get_question_total_uses(course_id, assessment_id, question_id) do
+ Repo.one(
+ from(l in LLMUsageLog,
+ where:
+ l.course_id == ^course_id and l.assessment_id == ^assessment_id and
+ l.question_id == ^question_id,
+ select: count(l.id)
+ )
+ ) || 0
+ end
+
+ defp get_question_avg_rating(course_id, assessment_id, question_id) do
+ avg_rating =
+ Repo.one(
+ from(f in LLMFeedback,
+ where:
+ f.course_id == ^course_id and f.assessment_id == ^assessment_id and
+ f.question_id == ^question_id,
+ where: not is_nil(f.rating),
+ select: avg(f.rating)
+ )
+ )
+
+ normalize_avg_rating(avg_rating)
+ end
+
+ defp normalize_avg_rating(nil), do: nil
+ defp normalize_avg_rating(avg_rating), do: Float.round(Decimal.to_float(avg_rating), 2)
+
+ defp proportional_token_count(_total_tokens, _question_uses, 0), do: 0
+
+ defp proportional_token_count(total_tokens, question_uses, total_uses) do
+ round(total_tokens * question_uses / total_uses)
+ end
+
+ defp proportional_cost(_total_cost, _question_uses, 0), do: Decimal.new("0.0")
+
+ defp proportional_cost(total_cost, question_uses, total_uses) do
+ cost_fraction = Decimal.div(Decimal.new(question_uses), Decimal.new(total_uses))
+ Decimal.round(Decimal.mult(total_cost, cost_fraction), 6, :half_up)
+ end
+
+ defp sum_assessment_input_tokens(assessments_with_stats) do
+ Enum.reduce(assessments_with_stats, 0, fn assessment, acc ->
+ acc + assessment.llm_total_input_tokens
+ end)
+ end
+
+ defp sum_assessment_output_tokens(assessments_with_stats) do
+ Enum.reduce(assessments_with_stats, 0, fn assessment, acc ->
+ acc + assessment.llm_total_output_tokens
+ end)
+ end
+
+ defp sum_assessment_costs(assessments_with_stats) do
+ Enum.reduce(assessments_with_stats, Decimal.new("0.0"), fn assessment, acc ->
+ Decimal.add(acc, assessment.llm_total_cost)
+ end)
+ end
+
+ # =====================
+ # Question-level Statistics
+ # =====================
+
+ @doc """
+ Returns LLM usage statistics for a specific question within an assessment.
+ """
+ def get_question_statistics(course_id, assessment_id, question_id) do
+ base =
+ from(l in LLMUsageLog,
+ where:
+ l.course_id == ^course_id and l.assessment_id == ^assessment_id and
+ l.question_id == ^question_id
+ )
+
+ total_uses = Repo.aggregate(base, :count)
+
+ unique_submissions =
+ Repo.one(
+ from(l in base,
+ select: count(l.submission_id, :distinct)
+ )
+ )
+
+ unique_users =
+ Repo.one(
+ from(l in base,
+ select: count(l.user_id, :distinct)
+ )
+ )
+
+ %{
+ total_uses: total_uses,
+ unique_submissions: unique_submissions,
+ unique_users: unique_users
+ }
+ end
+
+ # =====================
+ # Feedback
+ # =====================
+
+ @doc """
+ Submits user feedback for the LLM feature.
+ """
+ def submit_feedback(attrs) do
+ %LLMFeedback{}
+ |> LLMFeedback.changeset(attrs)
+ |> Repo.insert()
+ end
+
+ @doc """
+ Gets feedback for an assessment, optionally filtered by question_id.
+ """
+ def get_feedback(course_id, assessment_id, question_id \\ nil) do
+ query =
+ from(f in LLMFeedback,
+ join: u in assoc(f, :user),
+ where: f.course_id == ^course_id and f.assessment_id == ^assessment_id,
+ order_by: [desc: f.inserted_at],
+ select: %{
+ id: f.id,
+ rating: f.rating,
+ body: f.body,
+ user_name: u.name,
+ question_id: f.question_id,
+ inserted_at: f.inserted_at
+ }
+ )
+
+ query =
+ if question_id do
+ from(f in query, where: f.question_id == ^question_id)
+ else
+ query
+ end
+
+ Repo.all(query)
+ end
+end
diff --git a/lib/cadet/llm_stats/llm_feedback.ex b/lib/cadet/llm_stats/llm_feedback.ex
new file mode 100644
index 000000000..b940f8625
--- /dev/null
+++ b/lib/cadet/llm_stats/llm_feedback.ex
@@ -0,0 +1,34 @@
+defmodule Cadet.LLMStats.LLMFeedback do
+ @moduledoc """
+ Schema for user feedback on the LLM "Generate Comments" feature.
+ """
+
+ use Ecto.Schema
+ import Ecto.Changeset
+
+ schema "llm_feedback" do
+ belongs_to(:course, Cadet.Courses.Course)
+ belongs_to(:assessment, Cadet.Assessments.Assessment)
+ belongs_to(:question, Cadet.Assessments.Question)
+ belongs_to(:user, Cadet.Accounts.User)
+
+ field(:rating, :integer)
+ field(:body, :string)
+
+ timestamps()
+ end
+
+ @required_fields ~w(course_id user_id body)a
+ @optional_fields ~w(assessment_id question_id rating)a
+
+ def changeset(feedback, attrs) do
+ feedback
+ |> cast(attrs, @required_fields ++ @optional_fields)
+ |> validate_required(@required_fields)
+ |> validate_inclusion(:rating, 1..5)
+ |> foreign_key_constraint(:course_id)
+ |> foreign_key_constraint(:assessment_id)
+ |> foreign_key_constraint(:question_id)
+ |> foreign_key_constraint(:user_id)
+ end
+end
diff --git a/lib/cadet/llm_stats/llm_usage_log.ex b/lib/cadet/llm_stats/llm_usage_log.ex
new file mode 100644
index 000000000..3fecc8080
--- /dev/null
+++ b/lib/cadet/llm_stats/llm_usage_log.ex
@@ -0,0 +1,33 @@
+defmodule Cadet.LLMStats.LLMUsageLog do
+ @moduledoc """
+ Schema for logging each usage of the LLM "Generate Comments" feature.
+ """
+
+ use Ecto.Schema
+ import Ecto.Changeset
+
+ schema "llm_usage_logs" do
+ belongs_to(:course, Cadet.Courses.Course)
+ belongs_to(:assessment, Cadet.Assessments.Assessment)
+ belongs_to(:question, Cadet.Assessments.Question)
+ belongs_to(:answer, Cadet.Assessments.Answer)
+ belongs_to(:submission, Cadet.Assessments.Submission)
+ belongs_to(:user, Cadet.Accounts.User)
+
+ timestamps()
+ end
+
+ @required_fields ~w(course_id assessment_id question_id answer_id submission_id user_id)a
+
+ def changeset(log, attrs) do
+ log
+ |> cast(attrs, @required_fields)
+ |> validate_required(@required_fields)
+ |> foreign_key_constraint(:course_id)
+ |> foreign_key_constraint(:assessment_id)
+ |> foreign_key_constraint(:question_id)
+ |> foreign_key_constraint(:answer_id)
+ |> foreign_key_constraint(:submission_id)
+ |> foreign_key_constraint(:user_id)
+ end
+end
diff --git a/lib/cadet/workers/NotificationWorker.ex b/lib/cadet/workers/NotificationWorker.ex
index ad402a8fb..c240e1698 100644
--- a/lib/cadet/workers/NotificationWorker.ex
+++ b/lib/cadet/workers/NotificationWorker.ex
@@ -6,11 +6,11 @@ defmodule Cadet.Workers.NotificationWorker do
alias Cadet.{Email, Notifications, Mailer}
alias Cadet.Repo
- defp is_system_enabled(notification_type_id) do
+ defp system_enabled?(notification_type_id) do
Notifications.get_notification_type!(notification_type_id).is_enabled
end
- defp is_course_enabled(notification_type_id, course_id, assessment_config_id) do
+ defp course_enabled?(notification_type_id, course_id, assessment_config_id) do
notification_config =
Notifications.get_notification_config!(
notification_type_id,
@@ -25,7 +25,7 @@ defmodule Cadet.Workers.NotificationWorker do
end
end
- defp is_user_enabled(notification_type_id, course_reg_id) do
+ defp user_enabled?(notification_type_id, course_reg_id) do
pref = Notifications.get_notification_preference(notification_type_id, course_reg_id)
if is_nil(pref) do
@@ -37,7 +37,7 @@ defmodule Cadet.Workers.NotificationWorker do
# Returns true if user preference matches the job's time option.
# If user has made no preference, the default time option is used instead
- def is_user_time_option_matched(
+ def user_time_option_matched?(
notification_type_id,
assessment_config_id,
course_reg_id,
@@ -65,9 +65,9 @@ defmodule Cadet.Workers.NotificationWorker do
ntype = Cadet.Notifications.get_notification_type_by_name!("AVENGER BACKLOG")
notification_type_id = ntype.id
- if is_system_enabled(notification_type_id) do
+ if system_enabled?(notification_type_id) do
for course_id <- Cadet.Courses.get_all_course_ids() do
- if is_course_enabled(notification_type_id, course_id, nil) do
+ if course_enabled?(notification_type_id, course_id, nil) do
avengers_crs = Cadet.Accounts.CourseRegistrations.get_staffs(course_id)
for avenger_cr <- avengers_crs do
@@ -118,7 +118,7 @@ defmodule Cadet.Workers.NotificationWorker do
notification_type =
Cadet.Notifications.get_notification_type_by_name!("ASSESSMENT SUBMISSION")
- if is_system_enabled(notification_type.id) do
+ if system_enabled?(notification_type.id) do
submission = Cadet.Assessments.get_submission_by_id(submission_id)
course_id = submission.assessment.course_id
student_id = submission.student_id
@@ -129,10 +129,10 @@ defmodule Cadet.Workers.NotificationWorker do
avenger = avenger_cr.user
cond do
- !is_course_enabled(notification_type.id, course_id, assessment_config_id) ->
+ !course_enabled?(notification_type.id, course_id, assessment_config_id) ->
IO.puts("[ASSESSMENT_SUBMISSION] course-level disabled")
- !is_user_enabled(notification_type.id, avenger_cr.id) ->
+ !user_enabled?(notification_type.id, avenger_cr.id) ->
IO.puts("[ASSESSMENT_SUBMISSION] user-level disabled")
true ->
diff --git a/lib/cadet_web/admin_controllers/admin_assessments_controller.ex b/lib/cadet_web/admin_controllers/admin_assessments_controller.ex
index 862ad7444..b2223e8da 100644
--- a/lib/cadet_web/admin_controllers/admin_assessments_controller.ex
+++ b/lib/cadet_web/admin_controllers/admin_assessments_controller.ex
@@ -63,7 +63,7 @@ defmodule CadetWeb.AdminAssessmentsController do
end
def delete(conn, %{"course_id" => course_id, "assessmentid" => assessment_id}) do
- with {:same_course, true} <- {:same_course, is_same_course(course_id, assessment_id)},
+ with {:same_course, true} <- {:same_course, same_course?(course_id, assessment_id)},
{:ok, _} <- Assessments.delete_assessment(assessment_id) do
text(conn, "OK")
else
@@ -183,7 +183,7 @@ defmodule CadetWeb.AdminAssessmentsController do
end
end
- defp is_same_course(course_id, assessment_id) do
+ defp same_course?(course_id, assessment_id) do
Assessment
|> where(id: ^assessment_id)
|> where(course_id: ^course_id)
diff --git a/lib/cadet_web/admin_controllers/admin_llm_stats_controller.ex b/lib/cadet_web/admin_controllers/admin_llm_stats_controller.ex
new file mode 100644
index 000000000..fcf883b0b
--- /dev/null
+++ b/lib/cadet_web/admin_controllers/admin_llm_stats_controller.ex
@@ -0,0 +1,130 @@
+defmodule CadetWeb.AdminLLMStatsController do
+ @moduledoc """
+ Controller for per-assessment and per-question LLM usage statistics and feedback.
+ """
+
+ use CadetWeb, :controller
+ require Logger
+
+ alias Cadet.LLMStats
+
+ @doc """
+ GET /admin/llm-stats/:assessment_id
+ Returns assessment-level LLM usage statistics with per-question breakdown.
+ """
+ def course_stats(conn, %{"course_id" => course_id}) do
+ case parse_id(course_id) do
+ {:ok, course_id} ->
+ stats = LLMStats.get_course_statistics(course_id)
+ json(conn, stats)
+
+ :error ->
+ conn |> put_status(:bad_request) |> text("Invalid course_id")
+ end
+ end
+
+ def assessment_stats(conn, %{"course_id" => course_id, "assessment_id" => assessment_id}) do
+ with {:ok, course_id} <- parse_id(course_id),
+ {:ok, assessment_id} <- parse_id(assessment_id) do
+ stats = LLMStats.get_assessment_statistics(course_id, assessment_id)
+ json(conn, stats)
+ else
+ :error -> conn |> put_status(:bad_request) |> text("Invalid course_id or assessment_id")
+ end
+ end
+
+ @doc """
+ GET /admin/llm-stats/:assessment_id/:question_id
+ Returns question-level LLM usage statistics.
+ """
+ def question_stats(conn, %{
+ "course_id" => course_id,
+ "assessment_id" => assessment_id,
+ "question_id" => question_id
+ }) do
+ with {:ok, course_id} <- parse_id(course_id),
+ {:ok, assessment_id} <- parse_id(assessment_id),
+ {:ok, question_id} <- parse_id(question_id) do
+ stats = LLMStats.get_question_statistics(course_id, assessment_id, question_id)
+ json(conn, stats)
+ else
+ :error ->
+ conn
+ |> put_status(:bad_request)
+ |> text("Invalid course_id, assessment_id, or question_id")
+ end
+ end
+
+ @doc """
+ GET /admin/llm-stats/:assessment_id/feedback
+ Returns feedback for an assessment, optionally filtered by question_id query param.
+ """
+ def get_feedback(conn, params = %{"course_id" => course_id, "assessment_id" => assessment_id}) do
+ with {:ok, course_id} <- parse_id(course_id),
+ {:ok, assessment_id} <- parse_id(assessment_id),
+ {:ok, question_id} <- parse_optional_id(Map.get(params, "question_id")) do
+ feedback = LLMStats.get_feedback(course_id, assessment_id, question_id)
+ json(conn, feedback)
+ else
+ :error ->
+ conn
+ |> put_status(:bad_request)
+ |> text("Invalid course_id, assessment_id, or question_id")
+ end
+ end
+
+ @doc """
+ POST /admin/llm-stats/:assessment_id/feedback
+ Submits new feedback for the LLM feature on an assessment (optionally for a specific question).
+ """
+ def submit_feedback(
+ conn,
+ params = %{"course_id" => course_id, "assessment_id" => assessment_id}
+ ) do
+ with {:ok, course_id} <- parse_id(course_id),
+ {:ok, assessment_id} <- parse_id(assessment_id),
+ {:ok, question_id} <- parse_optional_id(Map.get(params, "question_id")) do
+ user = conn.assigns[:current_user]
+
+ attrs = %{
+ course_id: course_id,
+ user_id: user.id,
+ assessment_id: assessment_id,
+ question_id: question_id,
+ rating: Map.get(params, "rating"),
+ body: Map.get(params, "body")
+ }
+
+ case LLMStats.submit_feedback(attrs) do
+ {:ok, _feedback} ->
+ conn
+ |> put_status(:created)
+ |> json(%{message: "Feedback submitted successfully"})
+
+ {:error, changeset} ->
+ Logger.error("Failed to submit LLM feedback: #{inspect(changeset.errors)}")
+
+ conn
+ |> put_status(:bad_request)
+ |> json(%{error: "Failed to submit feedback"})
+ end
+ else
+ :error ->
+ conn
+ |> put_status(:bad_request)
+ |> text("Invalid course_id, assessment_id, or question_id")
+ end
+ end
+
+ defp parse_id(id) when is_integer(id), do: {:ok, id}
+
+ defp parse_id(id) when is_binary(id) do
+ case Integer.parse(id) do
+ {parsed, ""} -> {:ok, parsed}
+ _ -> :error
+ end
+ end
+
+ defp parse_optional_id(nil), do: {:ok, nil}
+ defp parse_optional_id(id), do: parse_id(id)
+end
diff --git a/lib/cadet_web/admin_views/admin_assessments_view.ex b/lib/cadet_web/admin_views/admin_assessments_view.ex
index 00bc81849..f1d81d15a 100644
--- a/lib/cadet_web/admin_views/admin_assessments_view.ex
+++ b/lib/cadet_web/admin_views/admin_assessments_view.ex
@@ -32,7 +32,8 @@ defmodule CadetWeb.AdminAssessmentsView do
maxTeamSize: :max_team_size,
hasVotingFeatures: :has_voting_features,
hasTokenCounter: :has_token_counter,
- isVotingPublished: :is_voting_published
+ isVotingPublished: :is_voting_published,
+ isLlmGraded: &(&1.has_llm_questions || &1.llm_assessment_prompt not in [nil, ""])
})
end
diff --git a/lib/cadet_web/admin_views/admin_grading_view.ex b/lib/cadet_web/admin_views/admin_grading_view.ex
index f6a90d17d..6414266a7 100644
--- a/lib/cadet_web/admin_views/admin_grading_view.ex
+++ b/lib/cadet_web/admin_views/admin_grading_view.ex
@@ -1,6 +1,7 @@
defmodule CadetWeb.AdminGradingView do
use CadetWeb, :view
+ alias Cadet.AIComments
import CadetWeb.AssessmentsHelpers
alias CadetWeb.AICodeAnalysisController
@@ -172,14 +173,36 @@ defmodule CadetWeb.AdminGradingView do
end
defp extract_ai_comments_per_answer(id, ai_comments) do
- matching_comment =
+ latest_comment =
ai_comments
- # Equivalent to fn comment -> comment.question_id == question_id end
- |> Enum.find(&(&1.answer_id == id))
+ |> Enum.filter(&(&1.answer_id == id))
+ |> case do
+ [] -> nil
+ comments -> Enum.max_by(comments, & &1.inserted_at, NaiveDateTime)
+ end
- case matching_comment do
- nil -> nil
- comment -> %{response: comment.response, insertedAt: comment.inserted_at}
+ case latest_comment do
+ nil ->
+ nil
+
+ comment ->
+ selected_indices = comment.selected_indices || []
+
+ selected_edits =
+ selected_indices
+ |> Enum.reduce(%{}, fn index, acc ->
+ case AIComments.get_latest_version(comment.id, index) do
+ nil -> acc
+ version -> Map.put(acc, index, version.content)
+ end
+ end)
+
+ %{
+ response: comment.response,
+ insertedAt: comment.inserted_at,
+ selectedIndices: selected_indices,
+ selectedEdits: selected_edits
+ }
end
end
@@ -221,7 +244,13 @@ defmodule CadetWeb.AdminGradingView do
end
defp build_prompts(answer, course, assessment) do
- if course.enable_llm_grading do
+ question_prompt =
+ Map.get(answer.question.question, "llm_prompt") ||
+ Map.get(answer.question.question, :llm_prompt)
+
+ if course.enable_llm_grading &&
+ present_prompt?(assessment.llm_assessment_prompt) &&
+ present_prompt?(question_prompt) do
AICodeAnalysisController.create_final_messages(
course.llm_course_level_prompt,
assessment.llm_assessment_prompt,
@@ -232,6 +261,9 @@ defmodule CadetWeb.AdminGradingView do
end
end
+ defp present_prompt?(value) when is_binary(value), do: String.trim(value) != ""
+ defp present_prompt?(_), do: false
+
defp build_grade(answer = %{grader: grader}) do
transform_map_for_view(answer, %{
grader: grader_builder(grader),
diff --git a/lib/cadet_web/controllers/answer_controller.ex b/lib/cadet_web/controllers/answer_controller.ex
index c4c99f03f..7c7a85354 100644
--- a/lib/cadet_web/controllers/answer_controller.ex
+++ b/lib/cadet_web/controllers/answer_controller.ex
@@ -17,7 +17,7 @@ defmodule CadetWeb.AnswerController do
with {:question, question} when not is_nil(question) <-
{:question, Assessments.get_question(question_id)},
{:is_open?, true} <-
- {:is_open?, can_bypass? or Assessments.is_open?(question.assessment)},
+ {:is_open?, can_bypass? or Assessments.open?(question.assessment)},
{:ok, _nil} <- Assessments.answer_question(question, course_reg, answer, can_bypass?) do
text(conn, "OK")
else
@@ -53,7 +53,7 @@ defmodule CadetWeb.AnswerController do
with {:question, question} when not is_nil(question) <-
{:question, Assessments.get_question(question_id)},
{:is_open?, true} <-
- {:is_open?, can_bypass? or Assessments.is_open?(question.assessment)},
+ {:is_open?, can_bypass? or Assessments.open?(question.assessment)},
{:ok, last_modified} <-
Assessments.has_last_modified_answer?(
question,
diff --git a/lib/cadet_web/controllers/assessments_controller.ex b/lib/cadet_web/controllers/assessments_controller.ex
index 76e746dfe..b5f613814 100644
--- a/lib/cadet_web/controllers/assessments_controller.ex
+++ b/lib/cadet_web/controllers/assessments_controller.ex
@@ -22,7 +22,7 @@ defmodule CadetWeb.AssessmentsController do
{:submission, Assessments.get_submission(assessment_id, cr)},
{:is_open?, true} <-
{:is_open?,
- cr.role in @bypass_closed_roles or Assessments.is_open?(submission.assessment)},
+ cr.role in @bypass_closed_roles or Assessments.open?(submission.assessment)},
{:ok, _nil} <- Assessments.finalise_submission(submission) do
Logger.info("Successfully submitted assessment #{assessment_id} for user #{cr.id}.")
diff --git a/lib/cadet_web/controllers/generate_ai_comments.ex b/lib/cadet_web/controllers/generate_ai_comments.ex
index 24122dd4d..ce8d37798 100644
--- a/lib/cadet_web/controllers/generate_ai_comments.ex
+++ b/lib/cadet_web/controllers/generate_ai_comments.ex
@@ -4,7 +4,7 @@ defmodule CadetWeb.AICodeAnalysisController do
require HTTPoison
require Logger
- alias Cadet.{Assessments, AIComments, Courses}
+ alias Cadet.{Assessments, AIComments, Courses, LLMStats}
alias CadetWeb.{AICodeAnalysisController, AICommentsHelpers}
# For logging outputs to both database and file
@@ -32,10 +32,7 @@ defmodule CadetWeb.AICodeAnalysisController do
end
existing_comment ->
- # Convert the existing comment struct to a map before merging
- updated_attrs = Map.merge(Map.from_struct(existing_comment), attrs)
-
- case AIComments.update_ai_comment(existing_comment.id, updated_attrs) do
+ case AIComments.update_ai_comment(existing_comment.id, attrs) do
{:error, :not_found} ->
Logger.error("AI comment to update not found in database")
{:error, :not_found}
@@ -82,8 +79,9 @@ defmodule CadetWeb.AICodeAnalysisController do
"course_id" => course_id
})
when is_ecto_id(answer_id) do
- with {answer_id_parsed, ""} <- Integer.parse(answer_id),
- {:ok, course} <- Courses.get_course_config(course_id),
+ with {:ok, answer_id_parsed} <- parse_answer_id(answer_id),
+ {:ok, course_id_parsed} <- parse_course_id(course_id),
+ {:ok, course} <- Courses.get_course_config(course_id_parsed),
{:ok} <- ensure_llm_enabled(course),
{:ok, key} <- AICommentsHelpers.decrypt_llm_api_key(course.llm_api_key),
{:ok} <-
@@ -104,16 +102,22 @@ defmodule CadetWeb.AICodeAnalysisController do
llm_model: course.llm_model,
llm_api_url: course.llm_api_url,
course_prompt: course.llm_course_level_prompt,
- assessment_prompt: Assessments.get_llm_assessment_prompt(answer.question_id)
+ assessment_prompt: Assessments.get_llm_assessment_prompt(answer.question_id),
+ course_id: course_id_parsed
}
)
else
- :error ->
+ {:error, :invalid_answer_id} ->
conn
|> put_status(:bad_request)
|> text("Invalid question ID format")
- {:decrypt_error, err} ->
+ {:error, :invalid_course_id} ->
+ conn
+ |> put_status(:bad_request)
+ |> text("Invalid course ID format")
+
+ {:decrypt_error, _err} ->
conn
|> put_status(:internal_server_error)
|> text("Failed to decrypt LLM API key")
@@ -201,7 +205,8 @@ defmodule CadetWeb.AICodeAnalysisController do
llm_model: llm_model,
llm_api_url: llm_api_url,
course_prompt: course_prompt,
- assessment_prompt: assessment_prompt
+ assessment_prompt: assessment_prompt,
+ course_id: course_id
}
) do
# Combine prompts if llm_prompt exists
@@ -228,35 +233,72 @@ defmodule CadetWeb.AICodeAnalysisController do
recv_timeout: 60_000
]
}) do
- {:ok, %{choices: [%{"message" => %{"content" => content}} | _]}} ->
- save_comment(
- answer.id,
- Enum.at(final_messages, 0).content,
- Enum.at(final_messages, 1).content,
- content
- )
-
- comments_list = String.split(content, "|||")
-
- filtered_comments =
- Enum.filter(comments_list, fn comment ->
- String.trim(comment) != ""
- end)
-
- json(conn, %{"comments" => filtered_comments})
-
- {:ok, other} ->
- save_comment(
- answer.id,
- Enum.at(final_messages, 0).content,
- Enum.at(final_messages, 1).content,
- Jason.encode!(other),
- "Unexpected JSON shape"
- )
-
- conn
- |> put_status(:bad_gateway)
- |> text("Unexpected response format from LLM")
+ {:ok, response} ->
+ # Handle cases where API may or may not return usage field
+ case response do
+ %{choices: [%{"message" => %{"content" => content}} | _]} ->
+ save_comment(
+ answer.id,
+ Enum.at(final_messages, 0).content,
+ Enum.at(final_messages, 1).content,
+ content
+ )
+
+ # Optionally update cost tracking if usage data is available
+ case Map.get(response, :usage) do
+ nil ->
+ Logger.warning("LLM API response missing usage field for answer_id=#{answer.id}")
+
+ usage ->
+ # get the tokens consumed and calc cost
+ Cadet.Assessments.update_llm_usage_and_cost(
+ answer.question.assessment_id,
+ usage
+ )
+ end
+
+ usage_attrs = %{
+ course_id: course_id,
+ assessment_id: answer.question.assessment_id,
+ question_id: answer.question_id,
+ answer_id: answer.id,
+ submission_id: answer.submission_id,
+ user_id: conn.assigns.course_reg.user_id
+ }
+
+ # Log LLM usage for statistics (non-blocking for response generation)
+ case LLMStats.log_usage(usage_attrs) do
+ {:ok, _usage_log} ->
+ :ok
+
+ {:error, changeset} ->
+ Logger.error(
+ "Failed to log LLM usage to database: #{inspect(changeset.errors)} attrs=#{inspect(usage_attrs)}"
+ )
+ end
+
+ comments_list = String.split(content, "|||")
+
+ filtered_comments =
+ Enum.filter(comments_list, fn comment ->
+ String.trim(comment) != ""
+ end)
+
+ json(conn, %{"comments" => filtered_comments})
+
+ _ ->
+ save_comment(
+ answer.id,
+ Enum.at(final_messages, 0).content,
+ Enum.at(final_messages, 1).content,
+ Jason.encode!(response),
+ "Unexpected JSON shape"
+ )
+
+ conn
+ |> put_status(:bad_gateway)
+ |> text("Unexpected response format from LLM")
+ end
{:error, reason} ->
save_comment(
@@ -274,23 +316,115 @@ defmodule CadetWeb.AICodeAnalysisController do
end
@doc """
- Saves the final comment chosen for a submission.
+ Saves the chosen comment indices and optional edits for each selected comment.
+ Expects: selected_indices (list of ints), edits (optional map of index => edited_text).
"""
- def save_final_comment(conn, %{
- "answer_id" => answer_id,
- "comment" => comment
- }) do
- case AIComments.update_final_comment(answer_id, comment) do
- {:ok, _updated_comment} ->
+ def save_chosen_comments(
+ conn,
+ params = %{
+ "answer_id" => answer_id,
+ "selected_indices" => selected_indices
+ }
+ )
+ when is_ecto_id(answer_id) do
+ editor_id = conn.assigns.course_reg.user_id
+ edits = Map.get(params, "edits", %{})
+
+ with {:ok, answer_id_parsed} <- parse_answer_id(answer_id),
+ ai_comment when not is_nil(ai_comment) <-
+ AIComments.get_latest_ai_comment(answer_id_parsed),
+ {:ok, parsed_edits} <- parse_edits(edits),
+ {:ok, _updated} <-
+ AIComments.save_selected_comments(answer_id_parsed, selected_indices, editor_id) do
+ # Create version entries for each edit
+ version_results =
+ Enum.map(parsed_edits, fn {index, edited_text} ->
+ AIComments.create_comment_version(
+ ai_comment.id,
+ index,
+ edited_text,
+ editor_id
+ )
+ end)
+
+ errors = Enum.filter(version_results, &match?({:error, _}, &1))
+
+ if errors == [] do
json(conn, %{"status" => "success"})
+ else
+ conn
+ |> put_status(:unprocessable_entity)
+ |> text("Failed to save some comment versions")
+ end
+ else
+ {:error, :invalid_answer_id} ->
+ conn
+ |> put_status(:bad_request)
+ |> text("Invalid answer ID format")
- {:error, changeset} ->
+ nil ->
+ conn
+ |> put_status(:not_found)
+ |> text("AI comment not found for this answer")
+
+ {:error, :invalid_edits} ->
+ conn
+ |> put_status(:unprocessable_entity)
+ |> text("Invalid edits payload")
+
+ {:error, _} ->
conn
|> put_status(:unprocessable_entity)
- |> text("Failed to save final comment")
+ |> text("Failed to save chosen comments")
+ end
+ end
+
+ defp parse_answer_id(answer_id) when is_integer(answer_id), do: {:ok, answer_id}
+
+ defp parse_answer_id(answer_id) when is_binary(answer_id) do
+ case Integer.parse(answer_id) do
+ {parsed, ""} -> {:ok, parsed}
+ _ -> {:error, :invalid_answer_id}
+ end
+ end
+
+ defp parse_course_id(course_id) when is_integer(course_id), do: {:ok, course_id}
+
+ defp parse_course_id(course_id) when is_binary(course_id) do
+ case Integer.parse(course_id) do
+ {parsed, ""} -> {:ok, parsed}
+ _ -> {:error, :invalid_course_id}
end
end
+ defp parse_edits(edits) when is_map(edits) do
+ edits
+ |> Enum.reduce_while({:ok, []}, fn {index_str, edited_text}, {:ok, acc} ->
+ case {parse_edit_index(index_str), edited_text} do
+ {{:ok, index}, edited_text} when is_binary(edited_text) ->
+ {:cont, {:ok, [{index, edited_text} | acc]}}
+
+ _ ->
+ {:halt, {:error, :invalid_edits}}
+ end
+ end)
+ |> case do
+ {:ok, parsed_edits} -> {:ok, Enum.reverse(parsed_edits)}
+ {:error, :invalid_edits} -> {:error, :invalid_edits}
+ end
+ end
+
+ defp parse_edits(_), do: {:error, :invalid_edits}
+
+ defp parse_edit_index(index_str) when is_binary(index_str) do
+ case Integer.parse(index_str) do
+ {index, ""} -> {:ok, index}
+ _ -> {:error, :invalid_edits}
+ end
+ end
+
+ defp parse_edit_index(_), do: {:error, :invalid_edits}
+
swagger_path :generate_ai_comments do
post("/courses/{course_id}/admin/generate-comments/{answer_id}")
@@ -313,10 +447,10 @@ defmodule CadetWeb.AICodeAnalysisController do
response(403, "LLM grading is not enabled for this course")
end
- swagger_path :save_final_comment do
- post("/courses/{course_id}/admin/save-final-comment/{answer_id}")
+ swagger_path :save_chosen_comments do
+ post("/courses/{course_id}/admin/save-chosen-comments/{answer_id}")
- summary("Save the final comment chosen for a submission.")
+ summary("Save chosen comment indices and optional edits for a submission.")
security([%{JWT: []}])
@@ -326,13 +460,13 @@ defmodule CadetWeb.AICodeAnalysisController do
parameters do
course_id(:path, :integer, "course id", required: true)
answer_id(:path, :integer, "answer id", required: true)
- comment(:body, :string, "The final comment to save", required: true)
+
+ body(:body, Schema.ref(:SaveChosenCommentsBody), "Chosen comments payload", required: true)
end
- response(200, "OK", Schema.ref(:SaveFinalComment))
- response(400, "Invalid or missing parameter(s)")
- response(401, "Unauthorized")
- response(403, "Forbidden")
+ response(200, "OK", Schema.ref(:SaveChosenComments))
+ response(404, "AI comment not found")
+ response(422, "Failed to save")
end
def swagger_definitions do
@@ -343,7 +477,15 @@ defmodule CadetWeb.AICodeAnalysisController do
comments(:string, "AI-generated comments on the submission answers")
end
end,
- SaveFinalComment:
+ SaveChosenCommentsBody:
+ swagger_schema do
+ properties do
+ selected_indices(Schema.array(:integer), "Indices of chosen comments", required: true)
+
+ edits(:object, "Map of comment index to edited text")
+ end
+ end,
+ SaveChosenComments:
swagger_schema do
properties do
status(:string, "Status of the operation")
diff --git a/lib/cadet_web/endpoint.ex b/lib/cadet_web/endpoint.ex
index 57f801b83..be96645f8 100644
--- a/lib/cadet_web/endpoint.ex
+++ b/lib/cadet_web/endpoint.ex
@@ -56,19 +56,4 @@ defmodule CadetWeb.Endpoint do
)
plug(CadetWeb.Router)
-
- @doc """
- Callback invoked for dynamically configuring the endpoint.
-
- It receives the endpoint configuration and checks if
- configuration should be loaded from the system environment.
- """
- def init(_key, config) do
- if config[:load_from_system_env] do
- port = System.get_env("PORT") || raise "expected the PORT environment variable to be set"
- {:ok, Keyword.put(config, :http, [:inet6, port: port] ++ (config[:http] || []))}
- else
- {:ok, config}
- end
- end
end
diff --git a/lib/cadet_web/helpers/ai_comments_helpers.ex b/lib/cadet_web/helpers/ai_comments_helpers.ex
index 4e2df33c9..965e501b2 100644
--- a/lib/cadet_web/helpers/ai_comments_helpers.ex
+++ b/lib/cadet_web/helpers/ai_comments_helpers.ex
@@ -4,7 +4,7 @@ defmodule CadetWeb.AICommentsHelpers do
"""
require Logger
- def decrypt_llm_api_key(nil), do: nil
+ def decrypt_llm_api_key(nil), do: {:decrypt_error, :no_api_key_configured}
def decrypt_llm_api_key(encrypted_key) do
case Application.get_env(:openai, :encryption_key) do
diff --git a/lib/cadet_web/router.ex b/lib/cadet_web/router.ex
index e045dc637..3f156adf5 100644
--- a/lib/cadet_web/router.ex
+++ b/lib/cadet_web/router.ex
@@ -233,17 +233,18 @@ defmodule CadetWeb.Router do
)
post(
- "/save-final-comment/:answer_id",
- AICodeAnalysisController,
- :save_final_comment
- )
-
- post(
- "/save-chosen-comments/:submissionid/:questionid",
+ "/save-chosen-comments/:answer_id",
AICodeAnalysisController,
:save_chosen_comments
)
+ # LLM Statistics & Feedback (per-assessment)
+ get("/llm-stats", AdminLLMStatsController, :course_stats)
+ get("/llm-stats/:assessment_id", AdminLLMStatsController, :assessment_stats)
+ get("/llm-stats/:assessment_id/feedback", AdminLLMStatsController, :get_feedback)
+ post("/llm-stats/:assessment_id/feedback", AdminLLMStatsController, :submit_feedback)
+ get("/llm-stats/:assessment_id/:question_id", AdminLLMStatsController, :question_stats)
+
get("/users", AdminUserController, :index)
get("/users/teamformation", AdminUserController, :get_students)
put("/users", AdminUserController, :upsert_users_and_groups)
@@ -319,8 +320,21 @@ defmodule CadetWeb.Router do
defp assign_course(conn, _opts) do
course_id = conn.path_params["course_id"]
+ parsed_course_id =
+ case Integer.parse(to_string(course_id)) do
+ {id, ""} -> id
+ _ -> nil
+ end
+
course_reg =
- Cadet.Accounts.CourseRegistrations.get_user_record(conn.assigns.current_user.id, course_id)
+ if is_nil(parsed_course_id) do
+ nil
+ else
+ Cadet.Accounts.CourseRegistrations.get_user_record(
+ conn.assigns.current_user.id,
+ parsed_course_id
+ )
+ end
case course_reg do
nil -> conn |> send_resp(403, "Forbidden") |> halt()
diff --git a/lib/cadet_web/views/assessments_view.ex b/lib/cadet_web/views/assessments_view.ex
index 3859b8615..547359215 100644
--- a/lib/cadet_web/views/assessments_view.ex
+++ b/lib/cadet_web/views/assessments_view.ex
@@ -9,7 +9,7 @@ defmodule CadetWeb.AssessmentsView do
end
def render("overview.json", %{assessment: assessment}) do
- transform_map_for_view(assessment, %{
+ base_map = %{
id: :id,
courseId: :course_id,
title: :title,
@@ -35,8 +35,29 @@ defmodule CadetWeb.AssessmentsView do
hasVotingFeatures: :has_voting_features,
hasTokenCounter: :has_token_counter,
isVotingPublished: :is_voting_published,
- hoursBeforeEarlyXpDecay: & &1.config.hours_before_early_xp_decay
- })
+ hoursBeforeEarlyXpDecay: & &1.config.hours_before_early_xp_decay,
+ isLlmGraded: &(&1.has_llm_questions || &1.llm_assessment_prompt not in [nil, ""])
+ }
+
+ # Only include LLM cost fields if the assessment is LLM graded
+ is_llm_graded =
+ assessment.has_llm_questions || assessment.llm_assessment_prompt not in [nil, ""]
+
+ final_map =
+ if is_llm_graded do
+ Map.merge(base_map, %{
+ llmInputCost: :llm_input_cost,
+ llmOutputCost: :llm_output_cost,
+ llmTotalInputTokens: :llm_total_input_tokens,
+ llmTotalOutputTokens: :llm_total_output_tokens,
+ llmTotalCachedTokens: :llm_total_cached_tokens,
+ llmTotalCost: :llm_total_cost
+ })
+ else
+ base_map
+ end
+
+ transform_map_for_view(assessment, final_map)
end
def render("show.json", %{assessment: assessment}) do
diff --git a/lib/cadet_web/views/courses_view.ex b/lib/cadet_web/views/courses_view.ex
index a3a3f443e..75953336b 100644
--- a/lib/cadet_web/views/courses_view.ex
+++ b/lib/cadet_web/views/courses_view.ex
@@ -17,6 +17,7 @@ defmodule CadetWeb.CoursesView do
enableSourcecast: :enable_sourcecast,
enableStories: :enable_stories,
enableLlmGrading: :enable_llm_grading,
+ hasLlmContent: :has_llm_content,
llmModel: :llm_model,
llmApiUrl: :llm_api_url,
llmCourseLevelPrompt: :llm_course_level_prompt,
diff --git a/lib/cadet_web/views/user_view.ex b/lib/cadet_web/views/user_view.ex
index 4a497465b..ed7f5f3be 100644
--- a/lib/cadet_web/views/user_view.ex
+++ b/lib/cadet_web/views/user_view.ex
@@ -1,6 +1,7 @@
defmodule CadetWeb.UserView do
use CadetWeb, :view
+ alias Cadet.Assessments.Query
alias Cadet.Courses
def render("index.json", %{
@@ -97,7 +98,9 @@ defmodule CadetWeb.UserView do
nil
_ ->
- transform_map_for_view(latest.course, %{
+ latest.course
+ |> Map.put(:has_llm_content, Query.course_has_llm_content?(latest.course.id))
+ |> transform_map_for_view(%{
courseName: :course_name,
courseShortName: :course_short_name,
viewable: :viewable,
@@ -109,6 +112,8 @@ defmodule CadetWeb.UserView do
topContestLeaderboardDisplay: :top_contest_leaderboard_display,
enableSourcecast: :enable_sourcecast,
enableStories: :enable_stories,
+ enableLlmGrading: :enable_llm_grading,
+ hasLlmContent: :has_llm_content,
sourceChapter: :source_chapter,
sourceVariant: :source_variant,
moduleHelpText: :module_help_text,
diff --git a/mix.exs b/mix.exs
index 79dcf4c4a..038a72637 100644
--- a/mix.exs
+++ b/mix.exs
@@ -57,14 +57,14 @@ defmodule Cadet.Mixfile do
# Type `mix help deps` for examples and options.
defp deps do
[
- {:arc, "~> 0.11"},
- {:arc_ecto, "~> 0.11"},
+ {:waffle, "~> 1.1"},
+ {:waffle_ecto, "~> 0.0.12"},
{:corsica, "~> 2.1"},
{:csv, "~> 3.2"},
{:ecto_enum, "~> 1.0"},
{:ex_aws, "~> 2.1", override: true},
{:ex_aws_lambda, "~> 2.0"},
- {:ex_aws_s3, "~> 2.0"},
+ {:ex_aws_s3, "2.4.0"},
{:ex_aws_secretsmanager, "~> 2.0"},
{:ex_aws_sts, "~> 2.1"},
{:ex_json_schema, "~> 0.11.0"},
diff --git a/mix.lock b/mix.lock
index 8c286b55f..813c953d2 100644
--- a/mix.lock
+++ b/mix.lock
@@ -1,6 +1,4 @@
%{
- "arc": {:hex, :arc, "0.11.0", "ac7a0cc03035317b6fef9fe94c97d7d9bd183a3e7ce1606aa0c175cfa8d1ba6d", [:mix], [{:ex_aws, "~> 2.0", [hex: :ex_aws, repo: "hexpm", optional: true]}, {:ex_aws_s3, "~> 2.0", [hex: :ex_aws_s3, repo: "hexpm", optional: true]}, {:hackney, "~> 1.0", [hex: :hackney, repo: "hexpm", optional: false]}, {:poison, "~> 2.2 or ~> 3.1", [hex: :poison, repo: "hexpm", optional: true]}, {:sweet_xml, "~> 0.6", [hex: :sweet_xml, repo: "hexpm", optional: true]}], "hexpm", "e91a8bd676fca716f6e46275ae81fb96c0bbc7a9d5b96cac511ae190588eddd0"},
- "arc_ecto": {:hex, :arc_ecto, "0.11.3", "52f278330fe3a29472ce5d9682514ca09eaed4b33453cbaedb5241a491464f7d", [:mix], [{:arc, "~> 0.11.0", [hex: :arc, repo: "hexpm", optional: false]}, {:ecto, ">= 2.1.0", [hex: :ecto, repo: "hexpm", optional: false]}], "hexpm", "24beed35003707434a778caece7d71e46e911d46d1e82e7787345264fc8e96d0"},
"artificery": {:hex, :artificery, "0.4.3", "0bc4260f988dcb9dda4b23f9fc3c6c8b99a6220a331534fdf5bf2fd0d4333b02", [:mix], [], "hexpm", "12e95333a30e20884e937abdbefa3e7f5e05609c2ba8cf37b33f000b9ffc0504"},
"bamboo": {:hex, :bamboo, "2.5.0", "973f5cb1471a1d2d7d9da7b8e4f6096afb6a133f85394631184fd40be8adb8ab", [:mix], [{:hackney, ">= 1.15.2", [hex: :hackney, repo: "hexpm", optional: false]}, {:jason, "~> 1.0", [hex: :jason, repo: "hexpm", optional: true]}, {:mime, "~> 1.4 or ~> 2.0", [hex: :mime, repo: "hexpm", optional: false]}, {:plug, "~> 1.0", [hex: :plug, repo: "hexpm", optional: false]}], "hexpm", "35c8635ff6677a81ab7258944ff15739280f3254a041b6f0229dddeb9b90ad3d"},
"bamboo_phoenix": {:hex, :bamboo_phoenix, "1.0.0", "f3cc591ffb163ed0bf935d256f1f4645cd870cf436545601215745fb9cc9953f", [:mix], [{:bamboo, ">= 2.0.0", [hex: :bamboo, repo: "hexpm", optional: false]}, {:phoenix, ">= 1.3.0", [hex: :phoenix, repo: "hexpm", optional: false]}], "hexpm", "6db88fbb26019c84a47994bb2bd879c0887c29ce6c559bc6385fd54eb8b37dee"},
@@ -101,6 +99,8 @@
"timex": {:hex, :timex, "3.7.13", "0688ce11950f5b65e154e42b47bf67b15d3bc0e0c3def62199991b8a8079a1e2", [:mix], [{:combine, "~> 0.10", [hex: :combine, repo: "hexpm", optional: false]}, {:gettext, "~> 0.26", [hex: :gettext, repo: "hexpm", optional: false]}, {:tzdata, "~> 1.1", [hex: :tzdata, repo: "hexpm", optional: false]}], "hexpm", "09588e0522669328e973b8b4fd8741246321b3f0d32735b589f78b136e6d4c54"},
"tzdata": {:hex, :tzdata, "1.1.3", "b1cef7bb6de1de90d4ddc25d33892b32830f907e7fc2fccd1e7e22778ab7dfbc", [:mix], [{:hackney, "~> 1.17", [hex: :hackney, repo: "hexpm", optional: false]}], "hexpm", "d4ca85575a064d29d4e94253ee95912edfb165938743dbf002acdf0dcecb0c28"},
"unicode_util_compat": {:hex, :unicode_util_compat, "0.7.1", "a48703a25c170eedadca83b11e88985af08d35f37c6f664d6dcfb106a97782fc", [:rebar3], [], "hexpm", "b3a917854ce3ae233619744ad1e0102e05673136776fb2fa76234f3e03b23642"},
+ "waffle": {:hex, :waffle, "1.1.10", "0f847ed6f95349af258a90f0f70ffea02b3d3729c4eb78f6fae7bf776e91779e", [:mix], [{:ex_aws, "~> 2.1", [hex: :ex_aws, repo: "hexpm", optional: true]}, {:ex_aws_s3, "~> 2.1", [hex: :ex_aws_s3, repo: "hexpm", optional: true]}, {:hackney, "~> 1.9", [hex: :hackney, repo: "hexpm", optional: false]}, {:sweet_xml, "~> 0.6", [hex: :sweet_xml, repo: "hexpm", optional: true]}], "hexpm", "859ba6377b78f0a51bc9596227b194f26241efbbd408bd217450c22b0f359cc4"},
+ "waffle_ecto": {:hex, :waffle_ecto, "0.0.12", "e5c17c49b071b903df71861c642093281123142dc4e9908c930db3e06795b040", [:mix], [{:ecto, "~> 3.0", [hex: :ecto, repo: "hexpm", optional: false]}, {:waffle, "~> 1.0", [hex: :waffle, repo: "hexpm", optional: false]}], "hexpm", "585fe6371057066d2e8e3383ddd7a2437ff0668caf3f4cbf5a041e0de9837168"},
"websock": {:hex, :websock, "0.5.3", "2f69a6ebe810328555b6fe5c831a851f485e303a7c8ce6c5f675abeb20ebdadc", [:mix], [], "hexpm", "6105453d7fac22c712ad66fab1d45abdf049868f253cf719b625151460b8b453"},
"websock_adapter": {:hex, :websock_adapter, "0.5.9", "43dc3ba6d89ef5dec5b1d0a39698436a1e856d000d84bf31a3149862b01a287f", [:mix], [{:bandit, ">= 0.6.0", [hex: :bandit, repo: "hexpm", optional: true]}, {:plug, "~> 1.14", [hex: :plug, repo: "hexpm", optional: false]}, {:plug_cowboy, "~> 2.6", [hex: :plug_cowboy, repo: "hexpm", optional: true]}, {:websock, "~> 0.5", [hex: :websock, repo: "hexpm", optional: false]}], "hexpm", "5534d5c9adad3c18a0f58a9371220d75a803bf0b9a3d87e6fe072faaeed76a08"},
"xml_builder": {:hex, :xml_builder, "2.4.0", "b20d23077266c81f593360dc037ea398461dddb6638a329743da6c73afa56725", [:mix], [], "hexpm", "833e325bb997f032b5a1b740d2fd6feed3c18ca74627f9f5f30513a9ae1a232d"},
diff --git a/priv/repo/migrations/20260225120000_create_ai_comment_versions.exs b/priv/repo/migrations/20260225120000_create_ai_comment_versions.exs
new file mode 100644
index 000000000..f930f05e6
--- /dev/null
+++ b/priv/repo/migrations/20260225120000_create_ai_comment_versions.exs
@@ -0,0 +1,19 @@
+defmodule Cadet.Repo.Migrations.CreateAiCommentVersions do
+ use Ecto.Migration
+
+ def change do
+ create table(:ai_comment_versions) do
+ add(:ai_comment_id, references(:ai_comment_logs, on_delete: :delete_all), null: false)
+ add(:comment_index, :integer, null: false)
+ add(:version_number, :integer, null: false, default: 1)
+ add(:editor_id, references(:users, on_delete: :nilify_all))
+ add(:content, :text)
+
+ timestamps()
+ end
+
+ create(index(:ai_comment_versions, [:ai_comment_id]))
+ create(index(:ai_comment_versions, [:editor_id]))
+ create(unique_index(:ai_comment_versions, [:ai_comment_id, :comment_index, :version_number]))
+ end
+end
diff --git a/priv/repo/migrations/20260225120001_add_fields_to_ai_comments.exs b/priv/repo/migrations/20260225120001_add_fields_to_ai_comments.exs
new file mode 100644
index 000000000..4b7e8e672
--- /dev/null
+++ b/priv/repo/migrations/20260225120001_add_fields_to_ai_comments.exs
@@ -0,0 +1,14 @@
+defmodule Cadet.Repo.Migrations.AddFieldsToAiComments do
+ use Ecto.Migration
+
+ def change do
+ alter table(:ai_comment_logs) do
+ add(:selected_indices, {:array, :integer})
+ add(:finalized_by_id, references(:users, on_delete: :nilify_all))
+ add(:finalized_at, :utc_datetime)
+ end
+
+ create(index(:ai_comment_logs, [:answer_id]))
+ create(index(:ai_comment_logs, [:finalized_by_id]))
+ end
+end
diff --git a/priv/repo/migrations/20260303120000_create_llm_usage_logs.exs b/priv/repo/migrations/20260303120000_create_llm_usage_logs.exs
new file mode 100644
index 000000000..ce8c16591
--- /dev/null
+++ b/priv/repo/migrations/20260303120000_create_llm_usage_logs.exs
@@ -0,0 +1,21 @@
+defmodule Cadet.Repo.Migrations.CreateLlmUsageLogs do
+ use Ecto.Migration
+
+ def change do
+ create table(:llm_usage_logs) do
+ add(:course_id, references(:courses, on_delete: :delete_all), null: false)
+ add(:assessment_id, references(:assessments, on_delete: :delete_all), null: false)
+ add(:question_id, references(:questions, on_delete: :delete_all), null: false)
+ add(:answer_id, references(:answers, on_delete: :delete_all), null: false)
+ add(:submission_id, references(:submissions, on_delete: :delete_all), null: false)
+ add(:user_id, references(:users, on_delete: :nilify_all), null: false)
+
+ timestamps()
+ end
+
+ create(index(:llm_usage_logs, [:course_id]))
+ create(index(:llm_usage_logs, [:assessment_id]))
+ create(index(:llm_usage_logs, [:user_id]))
+ create(index(:llm_usage_logs, [:submission_id]))
+ end
+end
diff --git a/priv/repo/migrations/20260303120001_create_llm_feedback.exs b/priv/repo/migrations/20260303120001_create_llm_feedback.exs
new file mode 100644
index 000000000..b086c61a0
--- /dev/null
+++ b/priv/repo/migrations/20260303120001_create_llm_feedback.exs
@@ -0,0 +1,19 @@
+defmodule Cadet.Repo.Migrations.CreateLlmFeedback do
+ use Ecto.Migration
+
+ def change do
+ create table(:llm_feedback) do
+ add(:course_id, references(:courses, on_delete: :delete_all), null: false)
+ add(:assessment_id, references(:assessments, on_delete: :delete_all))
+ add(:user_id, references(:users, on_delete: :nilify_all), null: false)
+ add(:rating, :integer)
+ add(:body, :text, null: false)
+
+ timestamps()
+ end
+
+ create(index(:llm_feedback, [:course_id]))
+ create(index(:llm_feedback, [:assessment_id]))
+ create(index(:llm_feedback, [:user_id]))
+ end
+end
diff --git a/priv/repo/migrations/20260303120002_add_question_id_to_llm_feedback.exs b/priv/repo/migrations/20260303120002_add_question_id_to_llm_feedback.exs
new file mode 100644
index 000000000..72e7bdbc7
--- /dev/null
+++ b/priv/repo/migrations/20260303120002_add_question_id_to_llm_feedback.exs
@@ -0,0 +1,11 @@
+defmodule Cadet.Repo.Migrations.AddQuestionIdToLlmFeedback do
+ use Ecto.Migration
+
+ def change do
+ alter table(:llm_feedback) do
+ add(:question_id, references(:questions, on_delete: :delete_all))
+ end
+
+ create(index(:llm_feedback, [:question_id]))
+ end
+end
diff --git a/priv/repo/migrations/20260316120000_remove_final_comment_from_ai_comment_logs.exs b/priv/repo/migrations/20260316120000_remove_final_comment_from_ai_comment_logs.exs
new file mode 100644
index 000000000..ce3e17419
--- /dev/null
+++ b/priv/repo/migrations/20260316120000_remove_final_comment_from_ai_comment_logs.exs
@@ -0,0 +1,9 @@
+defmodule Cadet.Repo.Migrations.RemoveFinalCommentFromAiCommentLogs do
+ use Ecto.Migration
+
+ def change do
+ alter table(:ai_comment_logs) do
+ remove(:final_comment, :text)
+ end
+ end
+end
diff --git a/priv/repo/migrations/20260320075234_add_detailed_llm_costs_to_assessments.exs b/priv/repo/migrations/20260320075234_add_detailed_llm_costs_to_assessments.exs
new file mode 100644
index 000000000..854043849
--- /dev/null
+++ b/priv/repo/migrations/20260320075234_add_detailed_llm_costs_to_assessments.exs
@@ -0,0 +1,25 @@
+defmodule Cadet.Repo.Migrations.RepairLlmColumnsOnAssessments do
+ use Ecto.Migration
+
+ def up do
+ alter table(:assessments) do
+ add_if_not_exists(:llm_input_cost, :decimal, precision: 10, scale: 6, default: 3.20)
+ add_if_not_exists(:llm_output_cost, :decimal, precision: 10, scale: 6, default: 12.80)
+ add_if_not_exists(:llm_total_input_tokens, :integer, default: 0)
+ add_if_not_exists(:llm_total_output_tokens, :integer, default: 0)
+ add_if_not_exists(:llm_total_cached_tokens, :integer, default: 0)
+ add_if_not_exists(:llm_total_cost, :decimal, precision: 10, scale: 6, default: 0.0)
+ end
+ end
+
+ def down do
+ alter table(:assessments) do
+ remove_if_exists(:llm_input_cost, :decimal)
+ remove_if_exists(:llm_output_cost, :decimal)
+ remove_if_exists(:llm_total_input_tokens, :integer)
+ remove_if_exists(:llm_total_output_tokens, :integer)
+ remove_if_exists(:llm_total_cached_tokens, :integer)
+ remove_if_exists(:llm_total_cost, :decimal)
+ end
+ end
+end
diff --git a/priv/repo/migrations/20260320094651_repair_llm_columns_on_assessments.exs b/priv/repo/migrations/20260320094651_repair_llm_columns_on_assessments.exs
new file mode 100644
index 000000000..854043849
--- /dev/null
+++ b/priv/repo/migrations/20260320094651_repair_llm_columns_on_assessments.exs
@@ -0,0 +1,25 @@
+defmodule Cadet.Repo.Migrations.RepairLlmColumnsOnAssessments do
+ use Ecto.Migration
+
+ def up do
+ alter table(:assessments) do
+ add_if_not_exists(:llm_input_cost, :decimal, precision: 10, scale: 6, default: 3.20)
+ add_if_not_exists(:llm_output_cost, :decimal, precision: 10, scale: 6, default: 12.80)
+ add_if_not_exists(:llm_total_input_tokens, :integer, default: 0)
+ add_if_not_exists(:llm_total_output_tokens, :integer, default: 0)
+ add_if_not_exists(:llm_total_cached_tokens, :integer, default: 0)
+ add_if_not_exists(:llm_total_cost, :decimal, precision: 10, scale: 6, default: 0.0)
+ end
+ end
+
+ def down do
+ alter table(:assessments) do
+ remove_if_exists(:llm_input_cost, :decimal)
+ remove_if_exists(:llm_output_cost, :decimal)
+ remove_if_exists(:llm_total_input_tokens, :integer)
+ remove_if_exists(:llm_total_output_tokens, :integer)
+ remove_if_exists(:llm_total_cached_tokens, :integer)
+ remove_if_exists(:llm_total_cost, :decimal)
+ end
+ end
+end
diff --git a/test/cadet/accounts/course_registration_test.exs b/test/cadet/accounts/course_registration_test.exs
index 0fec0d858..858fa7518 100644
--- a/test/cadet/accounts/course_registration_test.exs
+++ b/test/cadet/accounts/course_registration_test.exs
@@ -285,11 +285,11 @@ defmodule Cadet.Accounts.CourseRegistrationTest do
test "failed due to incomplete changeset", %{course1: course1, user2: user2} do
assert length(CourseRegistrations.get_users(course1.id)) == 1
+ invalid_params =
+ for {k, v} <- [user_id: user2.id, course_id: course1.id], into: %{}, do: {k, v}
+
assert_raise FunctionClauseError, fn ->
- CourseRegistrations.insert_or_update_course_registration(%{
- user_id: user2.id,
- course_id: course1.id
- })
+ CourseRegistrations.insert_or_update_course_registration(invalid_params)
end
assert length(CourseRegistrations.get_users(course1.id)) == 1
diff --git a/test/cadet/ai_comments_test.exs b/test/cadet/ai_comments_test.exs
new file mode 100644
index 000000000..d29bcc5a0
--- /dev/null
+++ b/test/cadet/ai_comments_test.exs
@@ -0,0 +1,54 @@
+defmodule Cadet.AICommentsTest do
+ use Cadet.DataCase
+
+ alias Cadet.{AIComments, Repo}
+ alias Cadet.AIComments.AICommentVersion
+ alias Ecto.Adapters.SQL.Sandbox
+
+ setup do
+ course = insert(:course)
+ assessment = insert(:assessment, course: course)
+ submission = insert(:submission, assessment: assessment)
+ question = insert(:programming_question, assessment: assessment)
+ answer = insert(:answer, submission: submission, question: question)
+ editor = insert(:user)
+
+ {:ok, ai_comment} =
+ AIComments.create_ai_comment(%{
+ answer_id: answer.id,
+ raw_prompt: "prompt",
+ answers_json: "[]"
+ })
+
+ {:ok, ai_comment: ai_comment, editor: editor}
+ end
+
+ test "creates distinct version numbers for concurrent edits", %{
+ ai_comment: ai_comment,
+ editor: editor
+ } do
+ parent = self()
+
+ create_version = fn content ->
+ Sandbox.allow(Repo, parent, self())
+ AIComments.create_comment_version(ai_comment.id, 0, content, editor.id)
+ end
+
+ task_1 = Task.async(fn -> create_version.("first edit") end)
+ task_2 = Task.async(fn -> create_version.("second edit") end)
+
+ assert {:ok, _} = Task.await(task_1, 5_000)
+ assert {:ok, _} = Task.await(task_2, 5_000)
+
+ versions =
+ Repo.all(
+ from(v in AICommentVersion,
+ where: v.ai_comment_id == ^ai_comment.id and v.comment_index == 0,
+ order_by: [asc: v.version_number]
+ )
+ )
+
+ assert Enum.map(versions, & &1.version_number) == [1, 2]
+ assert Enum.sort(Enum.map(versions, & &1.content)) == ["first edit", "second edit"]
+ end
+end
diff --git a/test/cadet/assessments/assessments_test.exs b/test/cadet/assessments/assessments_test.exs
index cd86b020a..18f3c7d0c 100644
--- a/test/cadet/assessments/assessments_test.exs
+++ b/test/cadet/assessments/assessments_test.exs
@@ -158,6 +158,51 @@ defmodule Cadet.AssessmentsTest do
)
end
+ test "update_llm_usage_and_cost handles nonexistent assessment gracefully" do
+ usage = %{
+ "prompt_tokens" => 10,
+ "completion_tokens" => 20,
+ "prompt_tokens_details" => %{"cached_tokens" => 5}
+ }
+
+ assert {:error, :not_found} = Assessments.update_llm_usage_and_cost(-1, usage)
+ end
+
+ test "update_llm_usage_and_cost explicitly preserves nil assessment path" do
+ usage = %{
+ "prompt_tokens" => 1,
+ "completion_tokens" => 1,
+ "prompt_tokens_details" => %{"cached_tokens" => 0}
+ }
+
+ assert {:error, :not_found} = Assessments.update_llm_usage_and_cost(-999_999, usage)
+ end
+
+ test "update_llm_usage_and_cost increments LLM totals for existing assessment" do
+ assessment =
+ insert(:assessment, %{
+ llm_total_input_tokens: 0,
+ llm_total_output_tokens: 0,
+ llm_total_cached_tokens: 0,
+ llm_total_cost: Decimal.new("0.0")
+ })
+
+ usage = %{
+ "prompt_tokens" => 10,
+ "completion_tokens" => 20,
+ "prompt_tokens_details" => %{"cached_tokens" => 5}
+ }
+
+ assert {:ok, nil} = Assessments.update_llm_usage_and_cost(assessment.id, usage)
+
+ updated = Repo.get(Assessment, assessment.id)
+
+ assert updated.llm_total_input_tokens == 10
+ assert updated.llm_total_output_tokens == 20
+ assert updated.llm_total_cached_tokens == 5
+ assert Decimal.compare(updated.llm_total_cost, Decimal.new("0.000288")) == :eq
+ end
+
test "force update assessment with invalid params" do
course = insert(:course)
config = insert(:assessment_config, %{course: course})
@@ -2624,10 +2669,8 @@ defmodule Cadet.AssessmentsTest do
test "filter by student username 1", %{
course_regs: %{avenger1_cr: avenger, students: students},
- assessments: assessments,
total_submissions: total_submissions
} do
- expected_length = length(Map.keys(assessments))
student = Enum.at(students, 0)
student_username = student.user.username
@@ -2639,19 +2682,20 @@ defmodule Cadet.AssessmentsTest do
submissions_from_res = res[:data][:submissions]
- assert length(submissions_from_res) == expected_length
+ assert length(submissions_from_res) > 0
Enum.each(submissions_from_res, fn s ->
- assert s.student_id == student.id
+ submission_student = Enum.find(students, fn st -> st.id == s.student_id end)
+ assert String.contains?(submission_student.user.username, student_username)
end)
+
+ assert Enum.any?(submissions_from_res, fn s -> s.student_id == student.id end)
end
test "filter by student username 2", %{
course_regs: %{avenger1_cr: avenger, students: students},
- assessments: assessments,
total_submissions: total_submissions
} do
- expected_length = length(Map.keys(assessments))
student = Enum.at(students, 1)
student_username = student.user.username
@@ -2663,19 +2707,20 @@ defmodule Cadet.AssessmentsTest do
submissions_from_res = res[:data][:submissions]
- assert length(submissions_from_res) == expected_length
+ assert length(submissions_from_res) > 0
Enum.each(submissions_from_res, fn s ->
- assert s.student_id == student.id
+ submission_student = Enum.find(students, fn st -> st.id == s.student_id end)
+ assert String.contains?(submission_student.user.username, student_username)
end)
+
+ assert Enum.any?(submissions_from_res, fn s -> s.student_id == student.id end)
end
test "filter by student username 3", %{
course_regs: %{avenger1_cr: avenger, students: students},
- assessments: assessments,
total_submissions: total_submissions
} do
- expected_length = length(Map.keys(assessments))
student = Enum.at(students, 2)
student_username = student.user.username
@@ -2687,11 +2732,14 @@ defmodule Cadet.AssessmentsTest do
submissions_from_res = res[:data][:submissions]
- assert length(submissions_from_res) == expected_length
+ assert length(submissions_from_res) > 0
Enum.each(submissions_from_res, fn s ->
- assert s.student_id == student.id
+ submission_student = Enum.find(students, fn st -> st.id == s.student_id end)
+ assert String.contains?(submission_student.user.username, student_username)
end)
+
+ assert Enum.any?(submissions_from_res, fn s -> s.student_id == student.id end)
end
test "filter by assessment config 1", %{
@@ -3021,7 +3069,7 @@ defmodule Cadet.AssessmentsTest do
end
end
- describe "is_fully_autograded? function" do
+ describe "fully_autograded? function" do
setup do
assessment = insert(:assessment)
student = insert(:course_registration, role: :student)
@@ -3042,7 +3090,7 @@ defmodule Cadet.AssessmentsTest do
insert(:answer, submission: submission, question: question, autograding_status: :success)
insert(:answer, submission: submission, question: question2, autograding_status: :success)
- assert Assessments.is_fully_autograded?(submission.id) == true
+ assert Assessments.fully_autograded?(submission.id) == true
end
test "returns false when not all answers are autograded successfully", %{
@@ -3053,7 +3101,7 @@ defmodule Cadet.AssessmentsTest do
insert(:answer, submission: submission, question: question, autograding_status: :success)
insert(:answer, submission: submission, question: question2, autograding_status: :failed)
- assert Assessments.is_fully_autograded?(submission.id) == false
+ assert Assessments.fully_autograded?(submission.id) == false
end
test "returns false when not all answers are autograded successfully 2", %{
@@ -3064,7 +3112,7 @@ defmodule Cadet.AssessmentsTest do
insert(:answer, submission: submission, question: question, autograding_status: :success)
insert(:answer, submission: submission, question: question2, autograding_status: :none)
- assert Assessments.is_fully_autograded?(submission.id) == false
+ assert Assessments.fully_autograded?(submission.id) == false
end
end
@@ -3180,12 +3228,12 @@ defmodule Cadet.AssessmentsTest do
test "correctly fetches all students with their xp in descending order", %{course: course} do
all_user_xp = Assessments.all_user_total_xp(course.id)
- assert get_all_student_xp(all_user_xp) == 50..1 |> Enum.to_list()
+ assert get_all_student_xp(all_user_xp) == 50..1//-1 |> Enum.to_list()
end
test "correctly fetches only relevant students for leaderboard display with potential overflow",
%{course: course} do
- Enum.each(1..50, fn x ->
+ Enum.each(1..50, fn _x ->
offset = Enum.random(0..49)
limit = Enum.random(1..50)
@@ -3193,7 +3241,7 @@ defmodule Cadet.AssessmentsTest do
Assessments.all_user_total_xp(course.id, %{offset: offset, limit: limit})
expected_xp_list =
- 50..1
+ 50..1//-1
|> Enum.to_list()
|> Enum.slice(offset, limit)
@@ -3253,7 +3301,7 @@ defmodule Cadet.AssessmentsTest do
fn student ->
Enum.map(
Enum.with_index(submission_list),
- fn {submission, index} ->
+ fn {submission, _index} ->
insert(
:submission_vote,
voter: student,
@@ -3350,7 +3398,7 @@ defmodule Cadet.AssessmentsTest do
test "correctly assigns xp to winning contest entries with defined xp values", %{
course: course,
voting_question: voting_question,
- ans_list: ans_list
+ ans_list: _ans_list
} do
# update defined xp_values for voting question
Question
diff --git a/test/cadet/assessments/query_test.exs b/test/cadet/assessments/query_test.exs
index 5f01b6e0e..d1d085bdb 100644
--- a/test/cadet/assessments/query_test.exs
+++ b/test/cadet/assessments/query_test.exs
@@ -28,4 +28,92 @@ defmodule Cadet.Assessments.QueryTest do
assert result.max_xp == 1000
end
+
+ test "all_assessments_with_aggregates sets has_llm_questions to true when any question has non-empty llm_prompt" do
+ course = insert(:course)
+ assessment = insert(:assessment, course: course)
+
+ insert(:question,
+ assessment: assessment,
+ question: build(:programming_question_content, llm_prompt: "Provide AI feedback")
+ )
+
+ insert(:question,
+ assessment: assessment,
+ question: build(:programming_question_content, llm_prompt: nil)
+ )
+
+ result =
+ course.id
+ |> Query.all_assessments_with_aggregates()
+ |> where(id: ^assessment.id)
+ |> Repo.one()
+
+ assert result.has_llm_questions == true
+ end
+
+ test "all_assessments_with_aggregates sets has_llm_questions to false when all llm_prompt values are nil or empty" do
+ course = insert(:course)
+ assessment = insert(:assessment, course: course)
+
+ insert(:question,
+ assessment: assessment,
+ question: build(:programming_question_content, llm_prompt: nil)
+ )
+
+ insert(:question,
+ assessment: assessment,
+ question: build(:programming_question_content, llm_prompt: "")
+ )
+
+ result =
+ course.id
+ |> Query.all_assessments_with_aggregates()
+ |> where(id: ^assessment.id)
+ |> Repo.one()
+
+ assert result.has_llm_questions == false
+ end
+
+ test "course_has_llm_content? returns false when course has no assessments" do
+ course = insert(:course)
+
+ assert Query.course_has_llm_content?(course.id) == false
+ end
+
+ test "course_has_llm_content? returns true when assessment has non-empty llm_assessment_prompt" do
+ course = insert(:course)
+ insert(:assessment, course: course, llm_assessment_prompt: "Use this grading rubric")
+
+ assert Query.course_has_llm_content?(course.id) == true
+ end
+
+ test "course_has_llm_content? returns true when any question has non-empty llm_prompt" do
+ course = insert(:course)
+ assessment = insert(:assessment, course: course, llm_assessment_prompt: nil)
+
+ insert(:question,
+ assessment: assessment,
+ question: build(:programming_question_content, llm_prompt: "Provide AI feedback")
+ )
+
+ assert Query.course_has_llm_content?(course.id) == true
+ end
+
+ test "course_has_llm_content? returns false when llm_assessment_prompt is empty and question llm_prompt values are nil or empty" do
+ course = insert(:course)
+ assessment = insert(:assessment, course: course, llm_assessment_prompt: "")
+
+ insert(:question,
+ assessment: assessment,
+ question: build(:programming_question_content, llm_prompt: nil)
+ )
+
+ insert(:question,
+ assessment: assessment,
+ question: build(:programming_question_content, llm_prompt: "")
+ )
+
+ assert Query.course_has_llm_content?(course.id) == false
+ end
end
diff --git a/test/cadet/courses/courses_test.exs b/test/cadet/courses/courses_test.exs
index 0a0e3a91c..9c2832e1b 100644
--- a/test/cadet/courses/courses_test.exs
+++ b/test/cadet/courses/courses_test.exs
@@ -67,6 +67,16 @@ defmodule Cadet.CoursesTest do
assert course.assessment_configs == ["Missions", "Quests"]
end
+ test "succeeds with string course id" do
+ course = insert(:course)
+ insert(:assessment_config, %{order: 1, type: "Missions", course: course})
+ insert(:assessment_config, %{order: 2, type: "Quests", course: course})
+
+ {:ok, loaded_course} = Courses.get_course_config(Integer.to_string(course.id))
+ assert loaded_course.id == course.id
+ assert loaded_course.assessment_configs == ["Missions", "Quests"]
+ end
+
test "returns with error for invalid course id" do
course = insert(:course)
diff --git a/test/cadet/llm_stats_test.exs b/test/cadet/llm_stats_test.exs
new file mode 100644
index 000000000..12d35d2ca
--- /dev/null
+++ b/test/cadet/llm_stats_test.exs
@@ -0,0 +1,430 @@
+defmodule Cadet.LLMStatsTest do
+ use Cadet.DataCase
+
+ alias Cadet.LLMStats
+ alias Cadet.LLMStats.LLMUsageLog
+
+ describe "log_usage/1" do
+ test "inserts a usage log record" do
+ course = insert(:course)
+ assessment = insert(:assessment, course: course)
+ question = insert(:question, assessment: assessment, display_order: 1)
+ student = insert(:course_registration, course: course, role: :student)
+ submission = insert(:submission, assessment: assessment, student: student)
+ answer = insert(:answer, submission: submission, question: question)
+ user = insert(:user)
+
+ attrs = %{
+ course_id: course.id,
+ assessment_id: assessment.id,
+ question_id: question.id,
+ answer_id: answer.id,
+ submission_id: submission.id,
+ user_id: user.id
+ }
+
+ assert {:ok, usage_log} = LLMStats.log_usage(attrs)
+ assert usage_log.course_id == course.id
+ assert usage_log.assessment_id == assessment.id
+ assert usage_log.question_id == question.id
+ assert usage_log.answer_id == answer.id
+ assert usage_log.submission_id == submission.id
+ assert usage_log.user_id == user.id
+
+ assert Repo.get(LLMUsageLog, usage_log.id)
+ end
+ end
+
+ describe "get_assessment_statistics/2" do
+ test "returns aggregate and per-question statistics scoped to assessment" do
+ course = insert(:course)
+ assessment = insert(:assessment, course: course)
+ question_1 = insert(:question, assessment: assessment, display_order: 1)
+ question_2 = insert(:question, assessment: assessment, display_order: 2)
+
+ student_1 = insert(:course_registration, course: course, role: :student)
+ student_2 = insert(:course_registration, course: course, role: :student)
+ submission_1 = insert(:submission, assessment: assessment, student: student_1)
+ submission_2 = insert(:submission, assessment: assessment, student: student_2)
+
+ answer_11 = insert(:answer, submission: submission_1, question: question_1)
+ answer_12 = insert(:answer, submission: submission_1, question: question_2)
+ answer_21 = insert(:answer, submission: submission_2, question: question_1)
+
+ user_1 = insert(:user)
+ user_2 = insert(:user)
+
+ assert {:ok, _} =
+ LLMStats.log_usage(%{
+ course_id: course.id,
+ assessment_id: assessment.id,
+ question_id: question_1.id,
+ answer_id: answer_11.id,
+ submission_id: submission_1.id,
+ user_id: user_1.id
+ })
+
+ assert {:ok, _} =
+ LLMStats.log_usage(%{
+ course_id: course.id,
+ assessment_id: assessment.id,
+ question_id: question_1.id,
+ answer_id: answer_11.id,
+ submission_id: submission_1.id,
+ user_id: user_1.id
+ })
+
+ assert {:ok, _} =
+ LLMStats.log_usage(%{
+ course_id: course.id,
+ assessment_id: assessment.id,
+ question_id: question_1.id,
+ answer_id: answer_21.id,
+ submission_id: submission_2.id,
+ user_id: user_2.id
+ })
+
+ assert {:ok, _} =
+ LLMStats.log_usage(%{
+ course_id: course.id,
+ assessment_id: assessment.id,
+ question_id: question_2.id,
+ answer_id: answer_12.id,
+ submission_id: submission_1.id,
+ user_id: user_1.id
+ })
+
+ other_course = insert(:course)
+ other_assessment = insert(:assessment, course: other_course)
+ other_question = insert(:question, assessment: other_assessment, display_order: 1)
+ other_student = insert(:course_registration, course: other_course, role: :student)
+ other_submission = insert(:submission, assessment: other_assessment, student: other_student)
+ other_answer = insert(:answer, submission: other_submission, question: other_question)
+
+ assert {:ok, _} =
+ LLMStats.log_usage(%{
+ course_id: other_course.id,
+ assessment_id: other_assessment.id,
+ question_id: other_question.id,
+ answer_id: other_answer.id,
+ submission_id: other_submission.id,
+ user_id: user_1.id
+ })
+
+ stats = LLMStats.get_assessment_statistics(course.id, assessment.id)
+
+ assert stats.total_uses == 4
+ assert stats.unique_submissions == 2
+ assert stats.unique_users == 2
+
+ assert [q1_stats, q2_stats] = stats.questions
+
+ assert q1_stats.question_id == question_1.id
+ assert q1_stats.display_order == 1
+ assert q1_stats.total_uses == 3
+ assert q1_stats.unique_submissions == 2
+ assert q1_stats.unique_users == 2
+
+ assert q2_stats.question_id == question_2.id
+ assert q2_stats.display_order == 2
+ assert q2_stats.total_uses == 1
+ assert q2_stats.unique_submissions == 1
+ assert q2_stats.unique_users == 1
+ end
+ end
+
+ describe "get_course_statistics/1" do
+ test "aggregates per-course llm stats and includes question-level breakdown" do
+ course = insert(:course)
+
+ assessment =
+ insert(:assessment,
+ course: course,
+ is_published: true,
+ llm_total_input_tokens: 100,
+ llm_total_output_tokens: 200,
+ llm_total_cost: Decimal.new("1.50")
+ )
+
+ question_1 =
+ insert(:question,
+ assessment: assessment,
+ display_order: 1,
+ question: %{"llm_prompt" => "prompt"}
+ )
+
+ question_2 =
+ insert(:question,
+ assessment: assessment,
+ display_order: 2,
+ question: %{"llm_prompt" => "prompt2"}
+ )
+
+ student = insert(:course_registration, course: course, role: :student)
+ submission = insert(:submission, assessment: assessment, student: student)
+ answer1 = insert(:answer, submission: submission, question: question_1)
+ answer2 = insert(:answer, submission: submission, question: question_2)
+
+ user = insert(:user)
+
+ assert {:ok, _} =
+ LLMStats.log_usage(%{
+ course_id: course.id,
+ assessment_id: assessment.id,
+ question_id: question_1.id,
+ answer_id: answer1.id,
+ submission_id: submission.id,
+ user_id: user.id
+ })
+
+ assert {:ok, _} =
+ LLMStats.log_usage(%{
+ course_id: course.id,
+ assessment_id: assessment.id,
+ question_id: question_2.id,
+ answer_id: answer2.id,
+ submission_id: submission.id,
+ user_id: user.id
+ })
+
+ assert {:ok, _} =
+ LLMStats.submit_feedback(%{
+ course_id: course.id,
+ assessment_id: assessment.id,
+ question_id: question_1.id,
+ user_id: user.id,
+ rating: 4,
+ body: "good"
+ })
+
+ assert {:ok, _} =
+ LLMStats.submit_feedback(%{
+ course_id: course.id,
+ assessment_id: assessment.id,
+ question_id: question_2.id,
+ user_id: user.id,
+ rating: 2,
+ body: "bad"
+ })
+
+ result = LLMStats.get_course_statistics(course.id)
+
+ assert result.course_total_input_tokens == 100
+ assert result.course_total_output_tokens == 200
+ assert Decimal.compare(result.course_total_cost, Decimal.new("1.50")) == :eq
+ assert length(result.assessments) == 1
+
+ [assessment_stats] = result.assessments
+ assert assessment_stats.total_uses == 2
+ assert assessment_stats.avg_rating == 3.0
+ assert length(assessment_stats.questions) == 2
+ end
+
+ test "excludes assessments without llm_assessment_prompt and llm_prompt from course statistics" do
+ course = insert(:course)
+
+ llm_assessment =
+ insert(:assessment,
+ course: course,
+ is_published: true,
+ title: "Mission With LLM",
+ llm_assessment_prompt: "Use this rubric",
+ llm_total_input_tokens: 10,
+ llm_total_output_tokens: 20,
+ llm_total_cost: Decimal.new("0.50")
+ )
+
+ # This assessment should never appear in course-level LLM stats.
+ insert(:assessment,
+ course: course,
+ is_published: true,
+ title: "Mission Without LLM",
+ llm_assessment_prompt: nil,
+ llm_total_input_tokens: 999,
+ llm_total_output_tokens: 999,
+ llm_total_cost: Decimal.new("9.99")
+ )
+
+ result = LLMStats.get_course_statistics(course.id)
+
+ assert length(result.assessments) == 1
+ [assessment_stats] = result.assessments
+ assert assessment_stats.assessment_id == llm_assessment.id
+ assert assessment_stats.title == "Mission With LLM"
+ assert result.course_total_input_tokens == 10
+ assert result.course_total_output_tokens == 20
+ assert Decimal.compare(result.course_total_cost, Decimal.new("0.50")) == :eq
+ end
+
+ test "includes assessments with question-level llm_prompt even when llm_assessment_prompt is nil" do
+ course = insert(:course)
+
+ question_prompt_assessment =
+ insert(:assessment,
+ course: course,
+ is_published: true,
+ title: "Question Prompt Only",
+ llm_assessment_prompt: nil,
+ llm_total_input_tokens: 30,
+ llm_total_output_tokens: 40,
+ llm_total_cost: Decimal.new("0.70")
+ )
+
+ insert(:question,
+ assessment: question_prompt_assessment,
+ display_order: 1,
+ question: %{"llm_prompt" => "grade with rubric"}
+ )
+
+ insert(:assessment,
+ course: course,
+ is_published: true,
+ title: "No LLM Tags",
+ llm_assessment_prompt: nil,
+ llm_total_input_tokens: 999,
+ llm_total_output_tokens: 999,
+ llm_total_cost: Decimal.new("9.99")
+ )
+
+ result = LLMStats.get_course_statistics(course.id)
+
+ assert length(result.assessments) == 1
+ [assessment_stats] = result.assessments
+ assert assessment_stats.assessment_id == question_prompt_assessment.id
+ assert assessment_stats.title == "Question Prompt Only"
+ assert result.course_total_input_tokens == 30
+ assert result.course_total_output_tokens == 40
+ assert Decimal.compare(result.course_total_cost, Decimal.new("0.70")) == :eq
+ end
+
+ test "treats null llm_total_cost as Decimal zero for course statistics" do
+ course = insert(:course)
+
+ assessment =
+ insert(:assessment,
+ course: course,
+ is_published: true,
+ title: "Null Cost Assessment",
+ llm_assessment_prompt: "Use this rubric",
+ llm_total_input_tokens: 10,
+ llm_total_output_tokens: 20,
+ llm_total_cost: nil
+ )
+
+ result = LLMStats.get_course_statistics(course.id)
+
+ assert length(result.assessments) == 1
+ [assessment_stats] = result.assessments
+ assert assessment_stats.assessment_id == assessment.id
+ assert Decimal.compare(assessment_stats.llm_total_cost, Decimal.new("0.0")) == :eq
+ assert Decimal.compare(result.course_total_cost, Decimal.new("0.0")) == :eq
+ end
+ end
+
+ describe "get_question_statistics/3" do
+ test "returns statistics scoped to one question" do
+ course = insert(:course)
+ assessment = insert(:assessment, course: course)
+ question_1 = insert(:question, assessment: assessment, display_order: 1)
+ question_2 = insert(:question, assessment: assessment, display_order: 2)
+
+ student_1 = insert(:course_registration, course: course, role: :student)
+ student_2 = insert(:course_registration, course: course, role: :student)
+ submission_1 = insert(:submission, assessment: assessment, student: student_1)
+ submission_2 = insert(:submission, assessment: assessment, student: student_2)
+
+ answer_11 = insert(:answer, submission: submission_1, question: question_1)
+ answer_21 = insert(:answer, submission: submission_2, question: question_1)
+ answer_12 = insert(:answer, submission: submission_1, question: question_2)
+
+ user_1 = insert(:user)
+ user_2 = insert(:user)
+
+ assert {:ok, _} =
+ LLMStats.log_usage(%{
+ course_id: course.id,
+ assessment_id: assessment.id,
+ question_id: question_1.id,
+ answer_id: answer_11.id,
+ submission_id: submission_1.id,
+ user_id: user_1.id
+ })
+
+ assert {:ok, _} =
+ LLMStats.log_usage(%{
+ course_id: course.id,
+ assessment_id: assessment.id,
+ question_id: question_1.id,
+ answer_id: answer_11.id,
+ submission_id: submission_1.id,
+ user_id: user_1.id
+ })
+
+ assert {:ok, _} =
+ LLMStats.log_usage(%{
+ course_id: course.id,
+ assessment_id: assessment.id,
+ question_id: question_1.id,
+ answer_id: answer_21.id,
+ submission_id: submission_2.id,
+ user_id: user_2.id
+ })
+
+ assert {:ok, _} =
+ LLMStats.log_usage(%{
+ course_id: course.id,
+ assessment_id: assessment.id,
+ question_id: question_2.id,
+ answer_id: answer_12.id,
+ submission_id: submission_1.id,
+ user_id: user_1.id
+ })
+
+ stats = LLMStats.get_question_statistics(course.id, assessment.id, question_1.id)
+
+ assert stats.total_uses == 3
+ assert stats.unique_submissions == 2
+ assert stats.unique_users == 2
+ end
+ end
+
+ describe "get_feedback/3" do
+ test "filters by question_id when provided" do
+ course = insert(:course)
+ assessment = insert(:assessment, course: course)
+ question_1 = insert(:question, assessment: assessment, display_order: 1)
+ question_2 = insert(:question, assessment: assessment, display_order: 2)
+ user_1 = insert(:user, name: "Alice")
+ user_2 = insert(:user, name: "Bob")
+
+ assert {:ok, _} =
+ LLMStats.submit_feedback(%{
+ course_id: course.id,
+ assessment_id: assessment.id,
+ question_id: question_1.id,
+ user_id: user_1.id,
+ rating: 5,
+ body: "Very helpful"
+ })
+
+ assert {:ok, _} =
+ LLMStats.submit_feedback(%{
+ course_id: course.id,
+ assessment_id: assessment.id,
+ question_id: question_2.id,
+ user_id: user_2.id,
+ rating: 3,
+ body: "Could be clearer"
+ })
+
+ unfiltered = LLMStats.get_feedback(course.id, assessment.id)
+ filtered = LLMStats.get_feedback(course.id, assessment.id, question_1.id)
+
+ assert Enum.count(unfiltered) == 2
+ assert Enum.count(filtered) == 1
+
+ assert [%{question_id: qid, user_name: "Alice", rating: 5, body: "Very helpful"}] = filtered
+ assert qid == question_1.id
+ end
+ end
+end
diff --git a/test/cadet/updater/xml_parser_test.exs b/test/cadet/updater/xml_parser_test.exs
index 6fe4e3e1f..8846d5e98 100644
--- a/test/cadet/updater/xml_parser_test.exs
+++ b/test/cadet/updater/xml_parser_test.exs
@@ -275,6 +275,80 @@ defmodule Cadet.Updater.XMLParserTest do
"Assessment has submissions, ignoring..."
end
end
+
+ test "maps LLM_QUESTION_PROMPT to programming question llm_prompt", %{
+ assessment_configs: [assessment_config | _],
+ course: course
+ } do
+ xml = """
+
+
+
+ None
+ Summary
+ Long summary
+
+
+ Prompted programming question
+ display(1);
+ Use rubric A.
+
+
+
+
+ """
+
+ assert :ok == XMLParser.parse_xml(xml, course.id, assessment_config.id)
+
+ assessment =
+ Assessment
+ |> where(number: "LLM_PROMPT_Q")
+ |> Repo.one!()
+
+ question =
+ Question
+ |> where(assessment_id: ^assessment.id)
+ |> Repo.one!()
+
+ assert question.question["llm_prompt"] == "Use rubric A."
+ end
+
+ test "keeps supporting legacy LLM_GRADING_PROMPT tag", %{
+ assessment_configs: [assessment_config | _],
+ course: course
+ } do
+ xml = """
+
+
+
+ None
+ Summary
+ Long summary
+
+
+ Legacy prompted programming question
+ display(2);
+ Use rubric B.
+
+
+
+
+ """
+
+ assert :ok == XMLParser.parse_xml(xml, course.id, assessment_config.id)
+
+ assessment =
+ Assessment
+ |> where(number: "LLM_PROMPT_G")
+ |> Repo.one!()
+
+ question =
+ Question
+ |> where(assessment_id: ^assessment.id)
+ |> Repo.one!()
+
+ assert question.question["llm_prompt"] == "Use rubric B."
+ end
end
describe "XML file processing" do
diff --git a/test/cadet_web/admin_controllers/admin_assessments_controller_test.exs b/test/cadet_web/admin_controllers/admin_assessments_controller_test.exs
index b1cfbde5a..62b39ce58 100644
--- a/test/cadet_web/admin_controllers/admin_assessments_controller_test.exs
+++ b/test/cadet_web/admin_controllers/admin_assessments_controller_test.exs
@@ -94,7 +94,8 @@ defmodule CadetWeb.AdminAssessmentsControllerTest do
"earlySubmissionXp" => &1.config.early_submission_xp,
"hasVotingFeatures" => &1.has_voting_features,
"hasTokenCounter" => &1.has_token_counter,
- "isVotingPublished" => false
+ "isVotingPublished" => false,
+ "isLlmGraded" => &1.has_llm_questions || &1.llm_assessment_prompt not in [nil, ""]
}
)
@@ -145,7 +146,8 @@ defmodule CadetWeb.AdminAssessmentsControllerTest do
"earlySubmissionXp" => &1.config.early_submission_xp,
"hasVotingFeatures" => &1.has_voting_features,
"hasTokenCounter" => &1.has_token_counter,
- "isVotingPublished" => false
+ "isVotingPublished" => false,
+ "isLlmGraded" => &1.has_llm_questions || &1.llm_assessment_prompt not in [nil, ""]
}
)
diff --git a/test/cadet_web/admin_controllers/admin_grading_controller_test.exs b/test/cadet_web/admin_controllers/admin_grading_controller_test.exs
index 6554b669b..4bf02818e 100644
--- a/test/cadet_web/admin_controllers/admin_grading_controller_test.exs
+++ b/test/cadet_web/admin_controllers/admin_grading_controller_test.exs
@@ -4,6 +4,7 @@ defmodule CadetWeb.AdminGradingControllerTest do
alias Cadet.Assessments.{Answer, Submission}
alias Cadet.Repo
alias CadetWeb.AdminGradingController
+ alias Ecto.Changeset
import Mock
@@ -454,6 +455,86 @@ defmodule CadetWeb.AdminGradingControllerTest do
conn = get(conn, build_url(course_id, 1))
assert response(conn, 400) == "Submission is not found."
end
+
+ @tag authenticate: :staff
+ test "returns prompts when both mission and question prompts are present", %{conn: conn} do
+ %{course: course, mission: mission, questions: questions, submissions: [submission | _]} =
+ seed_db(conn)
+
+ course
+ |> Changeset.change(enable_llm_grading: true)
+ |> Repo.update!()
+
+ mission
+ |> Changeset.change(llm_assessment_prompt: "Mission-level prompt")
+ |> Repo.update!()
+
+ questions
+ |> Enum.filter(&(&1.type == :programming))
+ |> Enum.each(fn programming_question ->
+ programming_question
+ |> Changeset.change(
+ question: Map.put(programming_question.question, "llm_prompt", "Task-level prompt")
+ )
+ |> Repo.update!()
+ end)
+
+ res =
+ conn
+ |> get(build_url(course.id, submission.id))
+ |> json_response(200)
+
+ programming_answers =
+ Enum.filter(res["answers"], &(&1["question"]["type"] == "programming"))
+
+ assert Enum.all?(programming_answers, &(length(&1["prompts"]) == 2))
+ end
+
+ @tag authenticate: :staff
+ test "returns empty prompts when mission-level prompt is missing", %{conn: conn} do
+ %{course: course, questions: questions, submissions: [submission | _]} = seed_db(conn)
+
+ course
+ |> Changeset.change(enable_llm_grading: true)
+ |> Repo.update!()
+
+ programming_question = Enum.find(questions, &(&1.type == :programming))
+
+ programming_question
+ |> Changeset.change(
+ question: Map.put(programming_question.question, "llm_prompt", "Task-level prompt")
+ )
+ |> Repo.update!()
+
+ res =
+ conn
+ |> get(build_url(course.id, submission.id))
+ |> json_response(200)
+
+ programming_answer = Enum.find(res["answers"], &(&1["question"]["type"] == "programming"))
+ assert programming_answer["prompts"] == []
+ end
+
+ @tag authenticate: :staff
+ test "returns empty prompts when question-level prompt is missing", %{conn: conn} do
+ %{course: course, mission: mission, submissions: [submission | _]} = seed_db(conn)
+
+ course
+ |> Changeset.change(enable_llm_grading: true)
+ |> Repo.update!()
+
+ mission
+ |> Changeset.change(llm_assessment_prompt: "Mission-level prompt")
+ |> Repo.update!()
+
+ res =
+ conn
+ |> get(build_url(course.id, submission.id))
+ |> json_response(200)
+
+ programming_answer = Enum.find(res["answers"], &(&1["question"]["type"] == "programming"))
+ assert programming_answer["prompts"] == []
+ end
end
describe "POST /:submissionid/:questionid, staff" do
diff --git a/test/cadet_web/admin_controllers/admin_llm_stats_controller_test.exs b/test/cadet_web/admin_controllers/admin_llm_stats_controller_test.exs
new file mode 100644
index 000000000..f8d64434d
--- /dev/null
+++ b/test/cadet_web/admin_controllers/admin_llm_stats_controller_test.exs
@@ -0,0 +1,401 @@
+defmodule CadetWeb.AdminLLMStatsControllerTest do
+ use CadetWeb.ConnCase
+
+ alias Cadet.{LLMStats, Repo, Courses.Course}
+
+ describe "GET /v2/courses/:course_id/admin/llm-stats" do
+ test "401 when not logged in", %{conn: conn} do
+ course = insert(:course)
+
+ conn = get(conn, "/v2/courses/#{course.id}/admin/llm-stats")
+ assert response(conn, 401) =~ "Unauthorised"
+ end
+
+ @tag authenticate: :student
+ test "403 for students", %{conn: conn} do
+ course_id = conn.assigns.course_id
+ course = Repo.get!(Course, course_id)
+ insert(:assessment, course: course, is_published: true)
+
+ conn = get(conn, "/v2/courses/#{course_id}/admin/llm-stats")
+ assert response(conn, 403) =~ "Forbidden"
+ end
+
+ @tag authenticate: :staff
+ test "returns course-level llm stats and assessment breakdown", %{conn: conn} do
+ course = Repo.get!(Course, conn.assigns.course_id)
+
+ assessment =
+ insert(:assessment,
+ course: course,
+ title: "Mission With LLM",
+ is_published: true,
+ llm_total_input_tokens: 10,
+ llm_total_output_tokens: 20,
+ llm_total_cost: Decimal.new("0.5")
+ )
+
+ insert(:assessment,
+ course: course,
+ title: "Mission Without LLM",
+ is_published: true,
+ llm_assessment_prompt: nil,
+ llm_total_input_tokens: 999,
+ llm_total_output_tokens: 999,
+ llm_total_cost: Decimal.new("9.99")
+ )
+
+ question =
+ insert(:question,
+ assessment: assessment,
+ display_order: 1,
+ question: %{"llm_prompt" => "x"}
+ )
+
+ student = insert(:course_registration, course: course, role: :student)
+ submission = insert(:submission, assessment: assessment, student: student)
+ answer = insert(:answer, submission: submission, question: question)
+
+ assert {:ok, _} =
+ LLMStats.log_usage(%{
+ course_id: course.id,
+ assessment_id: assessment.id,
+ question_id: question.id,
+ answer_id: answer.id,
+ submission_id: submission.id,
+ user_id: student.user_id
+ })
+
+ resp =
+ conn
+ |> get("/v2/courses/#{course.id}/admin/llm-stats")
+ |> json_response(200)
+
+ assert resp["course_total_input_tokens"] == 10
+ assert resp["course_total_output_tokens"] == 20
+ assert resp["course_total_cost"] == "0.500000"
+ assert length(resp["assessments"]) == 1
+ [as] = resp["assessments"]
+ assert as["title"] == "Mission With LLM"
+ assert as["total_uses"] == 1
+ assert length(as["questions"]) == 1
+ end
+ end
+
+ describe "GET /v2/courses/:course_id/admin/llm-stats/:assessment_id" do
+ test "401 when not logged in", %{conn: conn} do
+ course = insert(:course)
+ assessment = insert(:assessment, course: course)
+
+ conn = get(conn, assessment_stats_url(course.id, assessment.id))
+ assert response(conn, 401) =~ "Unauthorised"
+ end
+
+ @tag authenticate: :student
+ test "403 for students", %{conn: conn} do
+ course_id = conn.assigns.course_id
+ course = Repo.get!(Course, course_id)
+ assessment = insert(:assessment, course: course)
+
+ conn = get(conn, assessment_stats_url(course_id, assessment.id))
+ assert response(conn, 403) =~ "Forbidden"
+ end
+
+ @tag authenticate: :staff
+ test "returns assessment statistics with per-question breakdown", %{conn: conn} do
+ %{assessment: assessment, question_1: question_1, question_2: question_2} =
+ seed_usage_logs(conn)
+
+ resp =
+ conn
+ |> get(assessment_stats_url(conn.assigns.course_id, assessment.id))
+ |> json_response(200)
+
+ assert resp["total_uses"] == 4
+ assert resp["unique_submissions"] == 2
+ assert resp["unique_users"] == 2
+
+ assert [q1_stats, q2_stats] = resp["questions"]
+
+ assert q1_stats["question_id"] == question_1.id
+ assert q1_stats["display_order"] == question_1.display_order
+ assert q1_stats["total_uses"] == 3
+ assert q1_stats["unique_submissions"] == 2
+ assert q1_stats["unique_users"] == 2
+
+ assert q2_stats["question_id"] == question_2.id
+ assert q2_stats["display_order"] == question_2.display_order
+ assert q2_stats["total_uses"] == 1
+ assert q2_stats["unique_submissions"] == 1
+ assert q2_stats["unique_users"] == 1
+ end
+ end
+
+ describe "GET /v2/courses/:course_id/admin/llm-stats/:assessment_id/:question_id" do
+ test "401 when not logged in", %{conn: conn} do
+ course = insert(:course)
+ assessment = insert(:assessment, course: course)
+ question = insert(:question, assessment: assessment, display_order: 1)
+
+ conn = get(conn, question_stats_url(course.id, assessment.id, question.id))
+ assert response(conn, 401) =~ "Unauthorised"
+ end
+
+ @tag authenticate: :student
+ test "403 for students", %{conn: conn} do
+ course_id = conn.assigns.course_id
+ course = Repo.get!(Course, course_id)
+ assessment = insert(:assessment, course: course)
+ question = insert(:question, assessment: assessment, display_order: 1)
+
+ conn = get(conn, question_stats_url(course_id, assessment.id, question.id))
+ assert response(conn, 403) =~ "Forbidden"
+ end
+
+ @tag authenticate: :staff
+ test "returns question-level statistics", %{conn: conn} do
+ %{assessment: assessment, question_1: question_1} = seed_usage_logs(conn)
+
+ resp =
+ conn
+ |> get(question_stats_url(conn.assigns.course_id, assessment.id, question_1.id))
+ |> json_response(200)
+
+ assert resp == %{
+ "total_uses" => 3,
+ "unique_submissions" => 2,
+ "unique_users" => 2
+ }
+ end
+ end
+
+ describe "GET /v2/courses/:course_id/admin/llm-stats/:assessment_id/feedback" do
+ test "401 when not logged in", %{conn: conn} do
+ course = insert(:course)
+ assessment = insert(:assessment, course: course)
+
+ conn = get(conn, feedback_url(course.id, assessment.id))
+ assert response(conn, 401) =~ "Unauthorised"
+ end
+
+ @tag authenticate: :student
+ test "403 for students", %{conn: conn} do
+ course_id = conn.assigns.course_id
+ course = Repo.get!(Course, course_id)
+ assessment = insert(:assessment, course: course)
+
+ conn = get(conn, feedback_url(course_id, assessment.id))
+ assert response(conn, 403) =~ "Forbidden"
+ end
+
+ @tag authenticate: :staff
+ test "returns all feedback when question_id is absent", %{conn: conn} do
+ %{assessment: assessment} = seed_feedback(conn)
+
+ resp =
+ conn
+ |> get(feedback_url(conn.assigns.course_id, assessment.id))
+ |> json_response(200)
+
+ assert length(resp) == 2
+ assert Enum.all?(resp, &Map.has_key?(&1, "id"))
+ assert Enum.all?(resp, &Map.has_key?(&1, "rating"))
+ assert Enum.all?(resp, &Map.has_key?(&1, "body"))
+ assert Enum.all?(resp, &Map.has_key?(&1, "user_name"))
+ assert Enum.all?(resp, &Map.has_key?(&1, "question_id"))
+ assert Enum.all?(resp, &Map.has_key?(&1, "inserted_at"))
+ end
+
+ @tag authenticate: :staff
+ test "filters feedback by question_id query param", %{conn: conn} do
+ %{assessment: assessment, question_1: question_1} = seed_feedback(conn)
+
+ resp =
+ conn
+ |> get(feedback_url(conn.assigns.course_id, assessment.id), %{
+ "question_id" => question_1.id
+ })
+ |> json_response(200)
+
+ assert length(resp) == 1
+
+ [entry] = resp
+ assert entry["question_id"] == question_1.id
+ assert entry["user_name"] == "Alice"
+ assert entry["rating"] == 5
+ assert entry["body"] == "Very helpful"
+ end
+ end
+
+ describe "POST /v2/courses/:course_id/admin/llm-stats/:assessment_id/feedback" do
+ test "401 when not logged in", %{conn: conn} do
+ course = insert(:course)
+ assessment = insert(:assessment, course: course)
+
+ conn =
+ post(conn, feedback_url(course.id, assessment.id), %{"rating" => 5, "body" => "Great"})
+
+ assert response(conn, 401) =~ "Unauthorised"
+ end
+
+ @tag authenticate: :student
+ test "403 for students", %{conn: conn} do
+ course_id = conn.assigns.course_id
+ course = Repo.get!(Course, course_id)
+ assessment = insert(:assessment, course: course)
+
+ conn =
+ post(conn, feedback_url(course_id, assessment.id), %{"rating" => 5, "body" => "Great"})
+
+ assert response(conn, 403) =~ "Forbidden"
+ end
+
+ @tag authenticate: :staff
+ test "creates feedback successfully", %{conn: conn} do
+ course_id = conn.assigns.course_id
+ course = Repo.get!(Course, course_id)
+ assessment = insert(:assessment, course: course)
+ question = insert(:question, assessment: assessment, display_order: 1)
+
+ resp =
+ conn
+ |> post(feedback_url(course_id, assessment.id), %{
+ "question_id" => question.id,
+ "rating" => 4,
+ "body" => "Reasonably useful"
+ })
+ |> json_response(201)
+
+ assert resp == %{"message" => "Feedback submitted successfully"}
+
+ [saved_feedback] = LLMStats.get_feedback(course_id, assessment.id, question.id)
+ assert saved_feedback.rating == 4
+ assert saved_feedback.body == "Reasonably useful"
+ assert saved_feedback.user_name == conn.assigns.current_user.name
+ end
+
+ @tag authenticate: :staff
+ test "returns 400 when payload is invalid", %{conn: conn} do
+ course_id = conn.assigns.course_id
+ course = Repo.get!(Course, course_id)
+ assessment = insert(:assessment, course: course)
+
+ resp =
+ conn
+ |> post(feedback_url(course_id, assessment.id), %{"rating" => 6})
+ |> json_response(400)
+
+ assert resp == %{"error" => "Failed to submit feedback"}
+ end
+ end
+
+ defp seed_usage_logs(conn) do
+ course = Repo.get!(Course, conn.assigns.course_id)
+ assessment = insert(:assessment, course: course)
+ question_1 = insert(:question, assessment: assessment, display_order: 1)
+ question_2 = insert(:question, assessment: assessment, display_order: 2)
+
+ student_1 = insert(:course_registration, course: course, role: :student)
+ student_2 = insert(:course_registration, course: course, role: :student)
+
+ submission_1 = insert(:submission, assessment: assessment, student: student_1)
+ submission_2 = insert(:submission, assessment: assessment, student: student_2)
+
+ answer_11 = insert(:answer, submission: submission_1, question: question_1)
+ answer_12 = insert(:answer, submission: submission_1, question: question_2)
+ answer_21 = insert(:answer, submission: submission_2, question: question_1)
+
+ assert {:ok, _} =
+ LLMStats.log_usage(%{
+ course_id: course.id,
+ assessment_id: assessment.id,
+ question_id: question_1.id,
+ answer_id: answer_11.id,
+ submission_id: submission_1.id,
+ user_id: student_1.user_id
+ })
+
+ assert {:ok, _} =
+ LLMStats.log_usage(%{
+ course_id: course.id,
+ assessment_id: assessment.id,
+ question_id: question_1.id,
+ answer_id: answer_11.id,
+ submission_id: submission_1.id,
+ user_id: student_1.user_id
+ })
+
+ assert {:ok, _} =
+ LLMStats.log_usage(%{
+ course_id: course.id,
+ assessment_id: assessment.id,
+ question_id: question_1.id,
+ answer_id: answer_21.id,
+ submission_id: submission_2.id,
+ user_id: student_2.user_id
+ })
+
+ assert {:ok, _} =
+ LLMStats.log_usage(%{
+ course_id: course.id,
+ assessment_id: assessment.id,
+ question_id: question_2.id,
+ answer_id: answer_12.id,
+ submission_id: submission_1.id,
+ user_id: student_1.user_id
+ })
+
+ %{
+ assessment: assessment,
+ question_1: question_1,
+ question_2: question_2
+ }
+ end
+
+ defp seed_feedback(conn) do
+ course = Repo.get!(Course, conn.assigns.course_id)
+ assessment = insert(:assessment, course: course)
+ question_1 = insert(:question, assessment: assessment, display_order: 1)
+ question_2 = insert(:question, assessment: assessment, display_order: 2)
+ user_1 = insert(:user, name: "Alice")
+ user_2 = insert(:user, name: "Bob")
+
+ assert {:ok, _} =
+ LLMStats.submit_feedback(%{
+ course_id: course.id,
+ assessment_id: assessment.id,
+ question_id: question_1.id,
+ user_id: user_1.id,
+ rating: 5,
+ body: "Very helpful"
+ })
+
+ assert {:ok, _} =
+ LLMStats.submit_feedback(%{
+ course_id: course.id,
+ assessment_id: assessment.id,
+ question_id: question_2.id,
+ user_id: user_2.id,
+ rating: 3,
+ body: "Could be clearer"
+ })
+
+ %{
+ assessment: assessment,
+ question_1: question_1,
+ question_2: question_2
+ }
+ end
+
+ defp assessment_stats_url(course_id, assessment_id) do
+ "/v2/courses/#{course_id}/admin/llm-stats/#{assessment_id}"
+ end
+
+ defp question_stats_url(course_id, assessment_id, question_id) do
+ "/v2/courses/#{course_id}/admin/llm-stats/#{assessment_id}/#{question_id}"
+ end
+
+ defp feedback_url(course_id, assessment_id) do
+ "/v2/courses/#{course_id}/admin/llm-stats/#{assessment_id}/feedback"
+ end
+end
diff --git a/test/cadet_web/controllers/ai_code_analysis_controller_test.exs b/test/cadet_web/controllers/ai_code_analysis_controller_test.exs
index c6e1ca3a4..654c4ba69 100644
--- a/test/cadet_web/controllers/ai_code_analysis_controller_test.exs
+++ b/test/cadet_web/controllers/ai_code_analysis_controller_test.exs
@@ -2,8 +2,8 @@ import Mock
defmodule CadetWeb.AICodeAnalysisControllerTest do
use CadetWeb.ConnCase
- alias Cadet.{Repo, AIComments}
- alias Cadet.{AIComments.AIComment, Courses.Course}
+ alias Cadet.Repo
+ alias Cadet.AIComments.AIComment
alias CadetWeb.AICommentsHelpers
setup do
@@ -41,9 +41,9 @@ defmodule CadetWeb.AICodeAnalysisControllerTest do
admin_user: admin_user,
staff_user: staff_user,
course_with_llm: course_with_llm,
- example_assessment: example_assessment,
- new_submission: new_submission,
- question: question,
+ example_assessment: _example_assessment,
+ new_submission: _new_submission,
+ question: _question,
answer: answer
} do
# Make the API call
@@ -83,12 +83,12 @@ defmodule CadetWeb.AICodeAnalysisControllerTest do
test "errors out when given an invalid answer id", %{
conn: conn,
admin_user: admin_user,
- staff_user: staff_user,
+ staff_user: _staff_user,
course_with_llm: course_with_llm,
- example_assessment: example_assessment,
- new_submission: new_submission,
- question: question,
- answer: answer
+ example_assessment: _example_assessment,
+ new_submission: _new_submission,
+ question: _question,
+ answer: _answer
} do
random_answer_id = 324_324
@@ -97,22 +97,58 @@ defmodule CadetWeb.AICodeAnalysisControllerTest do
chat_completion: fn _input, _overrides ->
{:ok, %{:choices => [%{"message" => %{"content" => "Comment1|||Comment2"}}]}}
end do
- response =
- conn
- |> sign_in(admin_user.user)
- |> post(build_url_generate_ai_comments(course_with_llm.id, random_answer_id))
- |> text_response(400)
+ conn
+ |> sign_in(admin_user.user)
+ |> post(build_url_generate_ai_comments(course_with_llm.id, random_answer_id))
+ |> text_response(400)
end
end
+ test "errors out when given an invalid course id", %{
+ conn: conn,
+ admin_user: admin_user,
+ answer: answer
+ } do
+ conn =
+ conn
+ |> sign_in(admin_user.user)
+ |> post(build_url_generate_ai_comments("invalid-course-id", answer.id))
+
+ assert response(conn, 403) == "Forbidden"
+ end
+
+ test "errors out when LLM API key is missing", %{conn: conn} do
+ course_without_key =
+ insert(:course, %{
+ enable_llm_grading: true,
+ llm_api_key: nil,
+ llm_model: "gpt-5-mini",
+ llm_api_url: "http://testapi.com",
+ llm_course_level_prompt: "Example Prompt"
+ })
+
+ assessment = insert(:assessment, %{course: course_without_key})
+ submission = insert(:submission, %{assessment: assessment})
+ question = insert(:programming_question, %{assessment: assessment})
+ answer = insert(:answer, %{submission: submission, question: question})
+ admin_user = insert(:course_registration, %{role: :admin, course: course_without_key})
+
+ conn =
+ conn
+ |> sign_in(admin_user.user)
+ |> post(build_url_generate_ai_comments(course_without_key.id, answer.id))
+
+ assert response(conn, 500) == "Failed to decrypt LLM API key"
+ end
+
test "LLM endpoint returns an invalid response - should log errors in database", %{
conn: conn,
admin_user: admin_user,
- staff_user: staff_user,
+ staff_user: _staff_user,
course_with_llm: course_with_llm,
- example_assessment: example_assessment,
- new_submission: new_submission,
- question: question,
+ example_assessment: _example_assessment,
+ new_submission: _new_submission,
+ question: _question,
answer: answer
} do
# Make the API call that should fail
@@ -120,11 +156,10 @@ defmodule CadetWeb.AICodeAnalysisControllerTest do
chat_completion: fn _input, _overrides ->
{:ok, %{"body" => "Some unexpected response"}}
end do
- response =
- conn
- |> sign_in(admin_user.user)
- |> post(build_url_generate_ai_comments(course_with_llm.id, answer.id))
- |> text_response(502)
+ conn
+ |> sign_in(admin_user.user)
+ |> post(build_url_generate_ai_comments(course_with_llm.id, answer.id))
+ |> text_response(502)
end
# Verify database entry even with error
diff --git a/test/cadet_web/controllers/assessments_controller_test.exs b/test/cadet_web/controllers/assessments_controller_test.exs
index 17e7cca72..cb8ddfd1a 100644
--- a/test/cadet_web/controllers/assessments_controller_test.exs
+++ b/test/cadet_web/controllers/assessments_controller_test.exs
@@ -84,7 +84,8 @@ defmodule CadetWeb.AssessmentsControllerTest do
"hasVotingFeatures" => &1.has_voting_features,
"hasTokenCounter" => &1.has_token_counter,
"isVotingPublished" => false,
- "hoursBeforeEarlyXpDecay" => &1.config.hours_before_early_xp_decay
+ "hoursBeforeEarlyXpDecay" => &1.config.hours_before_early_xp_decay,
+ "isLlmGraded" => &1.has_llm_questions || &1.llm_assessment_prompt not in [nil, ""]
}
)
@@ -175,7 +176,8 @@ defmodule CadetWeb.AssessmentsControllerTest do
"hasVotingFeatures" => &1.has_voting_features,
"hasTokenCounter" => &1.has_token_counter,
"isVotingPublished" => false,
- "hoursBeforeEarlyXpDecay" => &1.config.hours_before_early_xp_decay
+ "hoursBeforeEarlyXpDecay" => &1.config.hours_before_early_xp_decay,
+ "isLlmGraded" => &1.has_llm_questions || &1.llm_assessment_prompt not in [nil, ""]
}
)
@@ -297,7 +299,8 @@ defmodule CadetWeb.AssessmentsControllerTest do
false
else
&1.is_published
- end
+ end,
+ "isLlmGraded" => &1.has_llm_questions || &1.llm_assessment_prompt not in [nil, ""]
}
)
@@ -1969,7 +1972,7 @@ defmodule CadetWeb.AssessmentsControllerTest do
defp build_url_unlock(course_id, assessment_id),
do: "/v2/courses/#{course_id}/assessments/#{assessment_id}/unlock"
- defp build_popular_leaderboard_url(course_id, assessment_id, params \\ %{}) do
+ defp build_popular_leaderboard_url(course_id, assessment_id, params) do
base_url = "#{build_url(course_id, assessment_id)}/contest_popular_leaderboard"
if params != %{} do
@@ -1980,7 +1983,7 @@ defmodule CadetWeb.AssessmentsControllerTest do
end
end
- defp build_score_leaderboard_url(course_id, assessment_id, params \\ %{}) do
+ defp build_score_leaderboard_url(course_id, assessment_id, params) do
base_url = "#{build_url(course_id, assessment_id)}/contest_score_leaderboard"
if params != %{} do
diff --git a/test/cadet_web/controllers/courses_controller_test.exs b/test/cadet_web/controllers/courses_controller_test.exs
index 876c0166e..cb7d807b7 100644
--- a/test/cadet_web/controllers/courses_controller_test.exs
+++ b/test/cadet_web/controllers/courses_controller_test.exs
@@ -169,6 +169,7 @@ defmodule CadetWeb.CoursesControllerTest do
"enableAchievements" => true,
"enableSourcecast" => true,
"enableStories" => false,
+ "hasLlmContent" => false,
"sourceChapter" => 1,
"sourceVariant" => "default",
"moduleHelpText" => "Help Text",
@@ -177,6 +178,44 @@ defmodule CadetWeb.CoursesControllerTest do
} = resp
end
+ @tag authenticate: :student
+ test "returns hasLlmContent true when assessment has non-empty llm_assessment_prompt", %{
+ conn: conn
+ } do
+ course_id = conn.assigns[:course_id]
+ course = Repo.get(Course, course_id)
+
+ insert(:assessment, course: course, llm_assessment_prompt: "Use this grading rubric")
+
+ resp = conn |> get(build_url_config(course_id)) |> json_response(200)
+
+ assert %{
+ "config" => %{
+ "hasLlmContent" => true
+ }
+ } = resp
+ end
+
+ @tag authenticate: :student
+ test "returns hasLlmContent true when any question has non-empty llm_prompt", %{conn: conn} do
+ course_id = conn.assigns[:course_id]
+ course = Repo.get(Course, course_id)
+ assessment = insert(:assessment, course: course, llm_assessment_prompt: nil)
+
+ insert(:question,
+ assessment: assessment,
+ question: build(:programming_question_content, llm_prompt: "Provide AI feedback")
+ )
+
+ resp = conn |> get(build_url_config(course_id)) |> json_response(200)
+
+ assert %{
+ "config" => %{
+ "hasLlmContent" => true
+ }
+ } = resp
+ end
+
@tag authenticate: :student
test "returns with error for user not belonging to the specified course", %{conn: conn} do
course_id = conn.assigns[:course_id]
diff --git a/test/cadet_web/controllers/user_controller_test.exs b/test/cadet_web/controllers/user_controller_test.exs
index 4cf471297..7084d08b9 100644
--- a/test/cadet_web/controllers/user_controller_test.exs
+++ b/test/cadet_web/controllers/user_controller_test.exs
@@ -108,6 +108,8 @@ defmodule CadetWeb.UserControllerTest do
"enableGame" => true,
"enableSourcecast" => true,
"enableStories" => false,
+ "enableLlmGrading" => false,
+ "hasLlmContent" => false,
"courseShortName" => "CS1101S",
"moduleHelpText" => "Help Text",
"courseName" => "Programming Methodology",
@@ -325,8 +327,10 @@ defmodule CadetWeb.UserControllerTest do
"enableAchievements" => true,
"enableGame" => true,
"enableSourcecast" => true,
+ "enableLlmGrading" => false,
"courseShortName" => "CS1101S",
"enableStories" => false,
+ "hasLlmContent" => false,
"moduleHelpText" => "Help Text",
"courseName" => "Programming Methodology",
"sourceChapter" => 1,
@@ -344,6 +348,26 @@ defmodule CadetWeb.UserControllerTest do
assert expected == resp
end
+ @tag authenticate: :student
+ test "includes hasLlmContent when latest viewed course contains llm-tagged assessment", %{
+ conn: conn
+ } do
+ course = conn.assigns.current_user.latest_viewed_course
+
+ insert(:assessment, %{
+ is_published: true,
+ course: course,
+ llm_assessment_prompt: "Use this rubric"
+ })
+
+ resp =
+ conn
+ |> get("/v2/user/latest_viewed_course")
+ |> json_response(200)
+
+ assert resp["courseConfiguration"]["hasLlmContent"] == true
+ end
+
@tag sign_in: %{latest_viewed_course: nil}
test "success, no latest_viewed_course", %{conn: conn} do
resp =
diff --git a/test/cadet_web/plug/rate_limiter_test.exs b/test/cadet_web/plug/rate_limiter_test.exs
index d5337f711..305d94d9a 100644
--- a/test/cadet_web/plug/rate_limiter_test.exs
+++ b/test/cadet_web/plug/rate_limiter_test.exs
@@ -1,6 +1,5 @@
defmodule CadetWeb.Plugs.RateLimiterTest do
use CadetWeb.ConnCase
- import Plug.Conn
alias CadetWeb.Plugs.RateLimiter
setup %{conn: conn} do
diff --git a/test/factories/assessments/assessment_factory.ex b/test/factories/assessments/assessment_factory.ex
index 5dba2955d..19c893580 100644
--- a/test/factories/assessments/assessment_factory.ex
+++ b/test/factories/assessments/assessment_factory.ex
@@ -40,7 +40,13 @@ defmodule Cadet.Assessments.AssessmentFactory do
close_at: Timex.shift(Timex.now(), days: Enum.random(1..30)),
is_published: false,
max_team_size: 1,
- llm_assessment_prompt: nil
+ llm_assessment_prompt: nil,
+ llm_input_cost: Decimal.new("3.20"),
+ llm_output_cost: Decimal.new("12.80"),
+ llm_total_input_tokens: 0,
+ llm_total_output_tokens: 0,
+ llm_total_cached_tokens: 0,
+ llm_total_cost: Decimal.new("0.0")
}
end
end
diff --git a/test/support/data_case.ex b/test/support/data_case.ex
index 5b0a5c76f..74f0e116f 100644
--- a/test/support/data_case.ex
+++ b/test/support/data_case.ex
@@ -51,7 +51,7 @@ defmodule Cadet.DataCase do
end
@doc """
- A helper that builds a Plug.Upload struct to test Arc.Ecto fields
+ A helper that builds a Plug.Upload struct to test Waffle.Ecto fields
"""
def build_upload(path, content_type \\ "image\png") do
%Plug.Upload{path: path, filename: Path.basename(path), content_type: content_type}
diff --git a/test/test_helper.exs b/test/test_helper.exs
index 30e668048..c6766c617 100644
--- a/test/test_helper.exs
+++ b/test/test_helper.exs
@@ -5,4 +5,27 @@ System.put_env("LEADER", "1")
ExUnit.start()
Faker.start()
+# Ensure test database exists and migrations are run
+_ = Ecto.Adapters.Postgres.ensure_all_started(Cadet.Repo, :temporary)
+
+case Cadet.Repo.start_link() do
+ {:ok, _pid} -> :ok
+ {:error, {:already_started, _pid}} -> :ok
+end
+
+case Ecto.Adapters.Postgres.storage_down(Cadet.Repo.config()) do
+ :ok -> :ok
+ {:error, :already_down} -> :ok
+ {:error, _} -> :ok
+end
+
+case Ecto.Adapters.Postgres.storage_up(Cadet.Repo.config()) do
+ :ok -> :ok
+ {:error, :already_up} -> :ok
+ {:error, _} -> :ok
+end
+
+# Run all pending migrations
+Ecto.Migrator.run(Cadet.Repo, :up, all: true)
+
Ecto.Adapters.SQL.Sandbox.mode(Cadet.Repo, :manual)