-
Notifications
You must be signed in to change notification settings - Fork 6
gpu support continue #257
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
gpu support continue #257
Changes from all commits
4c415a5
527e634
c08172b
dc98746
479a954
1a8eef5
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -1,9 +1,12 @@ | ||
| export prepare_data | ||
|
|
||
| function prepare_data(hm, data::KeyedArray; kwargs...) | ||
| predictors_forcing, targets = get_prediction_target_names(hm) | ||
| function prepare_data(hm, data::KeyedArray; cfg=DataConfig(), kwargs...) | ||
| predictors, forcings, targets = get_prediction_target_names(hm) | ||
| # KeyedArray: use () syntax for views that are differentiable | ||
| return (data(predictors_forcing), data(targets)) | ||
| dev = cfg.gdev | ||
| targets_nt = NamedTuple([target => dev(Array(data(target))) for target in targets]) | ||
| forcings_nt = NamedTuple([forcing => dev(Array(data(forcing))) for forcing in forcings]) | ||
| return ((dev(Array(data(predictors))), forcings_nt), targets_nt) | ||
| end | ||
|
|
||
| function prepare_data(hm, data::AbstractDimArray; kwargs...) | ||
|
|
@@ -13,10 +16,10 @@ function prepare_data(hm, data::AbstractDimArray; kwargs...) | |
| end | ||
|
|
||
| function prepare_data(hm, data::DataFrame; array_type = :KeyedArray, drop_missing_rows = true) | ||
| predictors_forcing, targets = get_prediction_target_names(hm) | ||
| predictors, forcings, targets = get_prediction_target_names(hm) | ||
|
|
||
| all_predictor_cols = unique(vcat(values(predictors_forcing)...)) | ||
| col_to_select = unique([all_predictor_cols; targets]) | ||
| # all_predictor_cols = unique(vcat(values(predictors_forcing)...)) | ||
| col_to_select = unique([predictors; forcings; targets]) | ||
|
|
||
| # subset to only the cols we care about | ||
| sdf = data[!, col_to_select] | ||
|
|
@@ -84,33 +87,36 @@ Returns a tuple of (predictors_forcing, targets) names. | |
| function get_prediction_target_names(hm) | ||
| targets = hm.targets | ||
| predictors_forcing = Symbol[] | ||
| predictors = Symbol[] | ||
| forcings = Symbol[] | ||
| for prop in propertynames(hm) | ||
| if occursin("predictors", string(prop)) | ||
| val = getproperty(hm, prop) | ||
| if isa(val, AbstractVector) | ||
| append!(predictors_forcing, val) | ||
| append!(predictors, val) | ||
| elseif isa(val, Union{NamedTuple, Tuple}) | ||
| append!(predictors_forcing, unique(vcat(values(val)...))) | ||
| append!(predictors, unique(vcat(values(val)...))) | ||
| end | ||
| end | ||
| end | ||
| for prop in propertynames(hm) | ||
| if occursin("forcing", string(prop)) | ||
| val = getproperty(hm, prop) | ||
| if isa(val, AbstractVector) | ||
| append!(predictors_forcing, val) | ||
| append!(forcings, val) | ||
| elseif isa(val, Union{Tuple, NamedTuple}) | ||
| append!(predictors_forcing, unique(vcat(values(val)...))) | ||
| append!(forcings, unique(vcat(values(val)...))) | ||
| end | ||
| end | ||
| end | ||
| predictors_forcing = unique(predictors_forcing) | ||
| # predicto | ||
| # predictors_forcing = unique(predictors_forcing) | ||
|
Comment on lines
+112
to
+113
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
|
|
||
| if isempty(predictors_forcing) | ||
| @warn "Note that you don't have predictors or forcing variables." | ||
| end | ||
|
Comment on lines
115
to
117
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
| if isempty(targets) | ||
| @warn "Note that you don't have target names." | ||
| end | ||
| return predictors_forcing, targets | ||
| return predictors, forcings, targets | ||
| end | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -18,14 +18,14 @@ Main loss function for hybrid models that handles both training and evaluation m | |
| - `(loss_values, st, ŷ)`: NamedTuple of losses, state and predictions | ||
| """ | ||
| function compute_loss( | ||
| HM::LuxCore.AbstractLuxContainerLayer, ps, st, (x, (y_t, y_nan)); | ||
| HM::LuxCore.AbstractLuxContainerLayer, ps, st, ((x, forcings), (y_t, y_nan)); | ||
| logging::LoggingLoss | ||
| ) | ||
|
|
||
| targets = HM.targets | ||
| ext_loss = extra_loss(logging) | ||
| if logging.train_mode | ||
| ŷ, st = HM(x, ps, st) | ||
| ŷ, st = HM((x, forcings), ps, st) | ||
| loss_value = _compute_loss(ŷ, y_t, y_nan, targets, training_loss(logging), logging.agg) | ||
| # Add extra_loss if provided | ||
| if ext_loss !== nothing | ||
|
|
@@ -34,7 +34,7 @@ function compute_loss( | |
| end | ||
| stats = NamedTuple() | ||
| else | ||
| ŷ, _ = HM(x, ps, LuxCore.testmode(st)) | ||
| ŷ, _ = HM((x, forcings), ps, LuxCore.testmode(st)) | ||
| loss_value = _compute_loss(ŷ, y_t, y_nan, targets, loss_types(logging), logging.agg) | ||
| # Add extra_loss entries if provided | ||
| if ext_loss !== nothing | ||
|
|
@@ -105,9 +105,10 @@ _get_target_ŷ(ŷ, y_t, target) = | |
| function assemble_loss(ŷ, y, y_nan, targets, loss_spec) | ||
| return [ | ||
| begin | ||
| y_t = _get_target_y(y, target) | ||
| ŷ_t = _get_target_ŷ(ŷ, y_t, target) | ||
| _apply_loss(ŷ_t, y_t, _get_target_nan(y_nan, target), loss_spec) | ||
| y_t = y[target]# _get_target_y(y, target) | ||
| ŷ_t = ŷ[target]#_get_target_ŷ(ŷ, y_t, target) | ||
| _apply_loss(ŷ_t, y_t, y_nan, loss_spec) | ||
| # _apply_loss(ŷ_t, y_t, _get_target_nan(y_nan, target), loss_spec) | ||
|
Comment on lines
+108
to
+111
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. There is a typo on line 109: y_t = y[target]
ŷ_t = ŷ[target]
_apply_loss(ŷ_t, y_t, y_nan[target], loss_spec) |
||
| end | ||
| for target in targets | ||
| ] | ||
|
|
||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
|
|
@@ -69,8 +69,7 @@ function loss_fn(ŷ, y, y_nan, ::Val{:pearson}) | |
| return cor(ŷ[y_nan], y[y_nan]) | ||
| end | ||
| function loss_fn(ŷ, y, y_nan, ::Val{:r2}) | ||
| r = cor(ŷ[y_nan], y[y_nan]) | ||
| return r * r | ||
| return 1 - sum((y[y_nan] .- ŷ[y_nan]).^2) / sum((y[y_nan] .- mean(ŷ[y_nan])).^2) | ||
|
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. |
||
| end | ||
|
|
||
| function loss_fn(ŷ, y, y_nan, ::Val{:pearsonLoss}) | ||
|
|
||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
The fields
gdevandcdevshould have explicit type annotations (e.g.,Lux.AbstractLuxDevice) to improve code clarity and potentially help with compiler optimizations. Additionally, ensure thatgpu_device()is the intended default for all instances ofDataConfig, as it may trigger device initialization.