From 90626d5ab247c3fd642fbd3bba6c5af6ef7d5068 Mon Sep 17 00:00:00 2001 From: Arlo White Date: Mon, 12 Aug 2024 10:28:56 +1000 Subject: [PATCH] re-format entire project --- Aviz/build/make.jl | 11 +- Aviz/build/precompile_script.jl | 1 - ODE_preferences.jl | 25 +- build/make.jl | 9 +- build/precompile_script.jl | 2 - docs/make.jl | 7 +- ext/AvizExt/AvizExt.jl | 129 ++-- ext/AvizExt/analysis.jl | 15 +- ext/AvizExt/layout.jl | 35 +- ext/AvizExt/plotting.jl | 28 +- ext/AvizExt/theme.jl | 8 +- ext/AvizExt/viz/clustering.jl | 386 +++++----- ext/AvizExt/viz/environment/cyclones.jl | 13 +- ext/AvizExt/viz/environment/dhw.jl | 10 +- ext/AvizExt/viz/location_selection.jl | 10 +- ext/AvizExt/viz/rule_extraction.jl | 34 +- ext/AvizExt/viz/scenarios.jl | 26 +- ext/AvizExt/viz/sensitivity.jl | 48 +- ext/AvizExt/viz/spatial.jl | 14 +- ext/AvizExt/viz/taxa_dynamics.jl | 49 +- ext/AvizExt/viz/viz.jl | 1 - src/ADRIA.jl | 130 +++- src/Domain.jl | 13 +- src/ExtInterface/ADRIA/Domain.jl | 17 +- src/ExtInterface/ReefMod/RMEDomain.jl | 41 +- src/ExtInterface/ReefMod/ReefModDomain.jl | 31 +- src/analysis/analysis.jl | 14 +- src/analysis/clustering.jl | 722 ++++++++++--------- src/analysis/intervention.jl | 12 +- src/analysis/pareto.jl | 16 +- src/analysis/rule_extraction.jl | 14 +- src/analysis/sensitivity.jl | 78 +- src/decision/Criteria/DecisionPreferences.jl | 2 +- src/decision/Criteria/DecisionWeights.jl | 5 +- src/decision/Criteria/FogCriteria.jl | 27 +- src/decision/Criteria/SRMCriteria.jl | 28 +- src/decision/Criteria/SeedCriteria.jl | 33 +- src/decision/dMCDA.jl | 9 +- src/decision/location_selection.jl | 84 ++- src/decision/mcda_methods.jl | 15 +- src/ecosystem/Ecosystem.jl | 13 +- src/ecosystem/connectivity.jl | 14 +- src/ecosystem/corals/CoralGrowth.jl | 19 +- src/ecosystem/corals/Corals.jl | 72 +- src/ecosystem/corals/growth.jl | 105 +-- src/ecosystem/corals/growth_expanded.jl | 10 +- src/factors/Factors.jl | 15 +- src/interventions/Interventions.jl | 34 +- src/interventions/seeding.jl | 30 +- src/io/ResultSet.jl | 44 +- src/io/initial_coral_cover.jl | 15 +- src/io/inputs.jl | 6 +- src/io/result_io.jl | 64 +- src/io/rme_result_io.jl | 47 +- src/io/sampling.jl | 84 ++- src/main_app.jl | 35 +- src/metrics/metrics.jl | 128 ++-- src/metrics/pareto.jl | 2 - src/metrics/performance.jl | 33 +- src/metrics/ranks.jl | 13 +- src/metrics/reef_indices.jl | 49 +- src/metrics/scenario.jl | 29 +- src/metrics/site_level.jl | 2 +- src/metrics/temporal.jl | 64 +- src/metrics/utils.jl | 7 +- src/scenario.jl | 131 ++-- src/spatial/spatial.jl | 1 - src/utils/scale.jl | 4 +- src/utils/setup.jl | 4 +- test/Ecosystem.jl | 30 +- test/clustering.jl | 200 ++--- test/connectivity.jl | 12 +- test/data_loading.jl | 27 +- test/growth.jl | 28 +- test/io/inputs.jl | 14 +- test/mcda.jl | 1 - test/metrics.jl | 29 +- test/runtests.jl | 27 +- test/sampling.jl | 91 ++- test/seeding.jl | 21 +- test/site_selection.jl | 21 +- test/spatial_clustering.jl | 8 +- test/spec.jl | 52 +- test/utils/scale.jl | 6 +- 84 files changed, 2146 insertions(+), 1647 deletions(-) diff --git a/Aviz/build/make.jl b/Aviz/build/make.jl index 69cd18282..2c3ede5a1 100644 --- a/Aviz/build/make.jl +++ b/Aviz/build/make.jl @@ -2,7 +2,6 @@ using Pkg, PackageCompiler - here = @__DIR__ cd(here) @@ -11,7 +10,9 @@ if "app" in ARGS cd("../..") - create_app("Aviz", "adria_aviz"; include_lazy_artifacts=true, force=true, incremental=true) + create_app( + "Aviz", "adria_aviz"; include_lazy_artifacts=true, force=true, incremental=true + ) exit() end @@ -29,7 +30,11 @@ if "sysimage" in ARGS project_deps = collect(keys(Pkg.project().dependencies)) try - create_sysimage(project_deps; sysimage_path=sysimage_fn, precompile_execution_file="precompile_script.jl") + create_sysimage( + project_deps; + sysimage_path=sysimage_fn, + precompile_execution_file="precompile_script.jl" + ) catch @info "Sysimage build failed..." end diff --git a/Aviz/build/precompile_script.jl b/Aviz/build/precompile_script.jl index a19a92fd1..4374add8e 100644 --- a/Aviz/build/precompile_script.jl +++ b/Aviz/build/precompile_script.jl @@ -3,7 +3,6 @@ using Statistics, Distributions using Makie.GeometryBasics using ADRIA, Makie - precompile(CSV.read, (String, DataFrame)) precompile(GeoDataFrames.read, (String,)) diff --git a/ODE_preferences.jl b/ODE_preferences.jl index d542c47ab..468c56d38 100644 --- a/ODE_preferences.jl +++ b/ODE_preferences.jl @@ -3,9 +3,22 @@ using OrdinaryDiffEq set_preferences!(UUID("1dea7af3-3e70-54e6-95c3-0bf5283fa5ed"), "PrecompileNonStiff" => true) set_preferences!(UUID("1dea7af3-3e70-54e6-95c3-0bf5283fa5ed"), "PrecompileStiff" => false) -set_preferences!(UUID("1dea7af3-3e70-54e6-95c3-0bf5283fa5ed"), "PrecompileAutoSwitch" => false) -set_preferences!(UUID("1dea7af3-3e70-54e6-95c3-0bf5283fa5ed"), "PrecompileLowStorage" => false) -set_preferences!(UUID("1dea7af3-3e70-54e6-95c3-0bf5283fa5ed"), "PrecompileDefaultSpecialize" => true) -set_preferences!(UUID("1dea7af3-3e70-54e6-95c3-0bf5283fa5ed"), "PrecompileAutoSpecialize" => false) -set_preferences!(UUID("1dea7af3-3e70-54e6-95c3-0bf5283fa5ed"), "PrecompileFunctionWrapperSpecialize" => false) -set_preferences!(UUID("1dea7af3-3e70-54e6-95c3-0bf5283fa5ed"), "PrecompileNoSpecialize" => false) +set_preferences!( + UUID("1dea7af3-3e70-54e6-95c3-0bf5283fa5ed"), "PrecompileAutoSwitch" => false +) +set_preferences!( + UUID("1dea7af3-3e70-54e6-95c3-0bf5283fa5ed"), "PrecompileLowStorage" => false +) +set_preferences!( + UUID("1dea7af3-3e70-54e6-95c3-0bf5283fa5ed"), "PrecompileDefaultSpecialize" => true +) +set_preferences!( + UUID("1dea7af3-3e70-54e6-95c3-0bf5283fa5ed"), "PrecompileAutoSpecialize" => false +) +set_preferences!( + UUID("1dea7af3-3e70-54e6-95c3-0bf5283fa5ed"), + "PrecompileFunctionWrapperSpecialize" => false +) +set_preferences!( + UUID("1dea7af3-3e70-54e6-95c3-0bf5283fa5ed"), "PrecompileNoSpecialize" => false +) diff --git a/build/make.jl b/build/make.jl index 6ac5bfb51..791703b93 100644 --- a/build/make.jl +++ b/build/make.jl @@ -1,6 +1,5 @@ using Pkg, PackageCompiler - # Removed ADRIA from sysimage project so line below is not needed (but could be useful later). # project_deps = filter!(i -> i != "ADRIA", project_deps) # Remove ADRIA (we don't want to make this static for dev purposes) sysimage_fn = "ADRIA_sysimage.dll" @@ -13,7 +12,11 @@ if "dev" in ARGS end project_deps = collect(keys(Pkg.project().dependencies)) -create_sysimage(project_deps; sysimage_path=sysimage_fn, precompile_execution_file="precompile_script.jl") +create_sysimage( + project_deps; + sysimage_path=sysimage_fn, + precompile_execution_file="precompile_script.jl" +) if "dev" in ARGS @info "Removing dev packages from project spec" @@ -25,4 +28,4 @@ end # A julia session with the sysimage can be started with: # julia --project=. -J ADRIA_sysimage.dll -exit() \ No newline at end of file +exit() diff --git a/build/precompile_script.jl b/build/precompile_script.jl index 20c773760..561a6acd1 100644 --- a/build/precompile_script.jl +++ b/build/precompile_script.jl @@ -1,10 +1,8 @@ using CSV, DataFrames, GeoDataFrames - precompile(CSV.read, (String, DataFrame)) precompile(GeoDataFrames.read, (String,)) - # precompile(load_domain, (String, Int64)) # precompile(Domain, (String, Int64, String, String, String, String, String, String, String)) # precompile(EnvLayer, (String, String, String, String, String, String, String)) diff --git a/docs/make.jl b/docs/make.jl index f56d83fee..57e9e5378 100644 --- a/docs/make.jl +++ b/docs/make.jl @@ -3,10 +3,9 @@ push!(LOAD_PATH, "../src/") using Documenter, ADRIA - -makedocs( +makedocs(; sitename="ADRIA.jl", - format=Documenter.HTML( + format=Documenter.HTML(; prettyurls=get(ENV, "CI", nothing) == "true", assets=["assets/favicon.ico"] ), @@ -43,7 +42,7 @@ makedocs( ] ) -deploydocs( +deploydocs(; repo="github.com/open-AIMS/ADRIA.jl.git", devbranch="main", target="build", diff --git a/ext/AvizExt/AvizExt.jl b/ext/AvizExt/AvizExt.jl index 566edbebc..7a561cc19 100644 --- a/ext/AvizExt/AvizExt.jl +++ b/ext/AvizExt/AvizExt.jl @@ -22,21 +22,18 @@ using ADRIA: import ADRIA: timesteps as AD_timesteps import ADRIA.viz: explore - Random.seed!(101) const ASSETS = @path joinpath(pkgdir(ADRIA), "assets") const LOGO = @path joinpath(ASSETS, "imgs", "ADRIA_logo.png") const LOADER = @path joinpath(ASSETS, "imgs", "ADRIA_loader.gif") - include("./plotting.jl") include("./layout.jl") include("./theme.jl") include("./analysis.jl") include("./viz/viz.jl") - """Main entry point for app.""" function julia_main()::Cint if "explore" in ARGS @@ -68,7 +65,7 @@ function main_menu() logo = image( f[1, 1], - rotr90(load(convert(String, LOGO))), + rotr90(load(convert(String, LOGO))); axis=(aspect=DataAspect(),) ) @@ -76,11 +73,11 @@ function main_menu() hidespines!(f.content[1]) Label(f[2, 1], "Enter ADRIA Result Set to analyze") - rs_path_tb = Textbox(f[3, 1], placeholder="./Moore_RS") + rs_path_tb = Textbox(f[3, 1]; placeholder="./Moore_RS") rs_path_tb.stored_string[] = "./Moore_RS" status_label = Label(f[4, 1], "") - launch_button = Button(f[5, 1], label="Explore") + launch_button = Button(f[5, 1]; label="Explore") on(launch_button.clicks) do c rs_path = rs_path_tb.stored_string[] @@ -104,13 +101,14 @@ function main_menu() end gl_screen = display(f) - wait(gl_screen) + return wait(gl_screen) end - function _get_seeded_sites(seed_log, ts, scens; N=10) - t = dropdims(sum(seed_log[timesteps=ts, scenarios=scens], dims=:timesteps), dims=:timesteps) - site_scores = dropdims(sum(t, dims=:scenarios), dims=:scenarios) + t = dropdims( + sum(seed_log[timesteps=ts, scenarios=scens]; dims=:timesteps); dims=:timesteps + ) + site_scores = dropdims(sum(t; dims=:scenarios); dims=:scenarios) # @info "Scores", site_scores if length(unique(site_scores)) == 1 @@ -133,10 +131,9 @@ function display_loader(fig, anim) end function remove_loader(fig, task) Base.throwto(task, InterruptException()) - empty!(fig) + return empty!(fig) end - """ ADRIA.viz.explore(rs::String) ADRIA.viz.explore(rs::ResultSet) @@ -145,7 +142,7 @@ Display GUI for quick visualization and analysis of results. """ function ADRIA.viz.explore(rs::ResultSet) @info "Creating display" - layout = comms_layout(size=(1920, 1080)) + layout = comms_layout(; size=(1920, 1080)) f = layout.figure traj_display = layout.trajectory.temporal @@ -159,20 +156,22 @@ function ADRIA.viz.explore(rs::ResultSet) tac_min_max = (minimum(tac_scens), maximum(tac_scens)) mean_rc_sites = metrics.relative_cover(rs) - obs_rc = vec(mean(mean_rc_sites, dims=(:scenarios, :timesteps))) + obs_rc = vec(mean(mean_rc_sites; dims=(:scenarios, :timesteps))) obs_mean_rc_sites = Observable(obs_rc) asv_scens = metrics.scenario_asv(rs) - asv_scen_dist = dropdims(mean(asv_scens, dims=:timesteps), dims=:timesteps) + asv_scen_dist = dropdims(mean(asv_scens; dims=:timesteps); dims=:timesteps) juves_scens = metrics.scenario_relative_juveniles(rs) - juves_scen_dist = dropdims(mean(juves_scens, dims=:timesteps), dims=:timesteps) + juves_scen_dist = dropdims(mean(juves_scens; dims=:timesteps); dims=:timesteps) # Generate trajectory controls @info "Creating controls" num_steps = Int(ceil((tac_min_max[2] - tac_min_max[1]) + 1)) - tac_slider = IntervalSlider(traj_outcome_sld[2, 1], - range=LinRange(floor(Int64, tac_min_max[1]) - 1, ceil(Int64, tac_min_max[2]) + 1, num_steps), + tac_slider = IntervalSlider(traj_outcome_sld[2, 1]; + range=LinRange( + floor(Int64, tac_min_max[1]) - 1, ceil(Int64, tac_min_max[2]) + 1, num_steps + ), startvalues=tac_min_max, horizontal=false ) @@ -187,7 +186,7 @@ function ADRIA.viz.explore(rs::ResultSet) years = AD_timesteps(rs) year_range = first(years), last(years) time_slider = IntervalSlider( - traj_time_sld[1, 2:3], + traj_time_sld[1, 2:3]; range=LinRange(year_range[1], year_range[2], (year_range[2] - year_range[1]) + 1), startvalues=year_range ) @@ -211,7 +210,7 @@ function ADRIA.viz.explore(rs::ResultSet) seed_log = rs.seed_log[:, 1, :, :] # Trajectories - series!(traj_display, years, tac_data, color=obs_color) + series!(traj_display, years, tac_data; color=obs_color) # Color transparency for density plots # Note: Density plots currently cannot handle empty datasets @@ -226,22 +225,22 @@ function ADRIA.viz.explore(rs::ResultSet) has_g = count(scen_groups[:guided]) > 0 # Density (TODO: Separate into own function) - tac_scen_dist = dropdims(mean(tac_scens, dims=:timesteps), dims=:timesteps) + tac_scen_dist = dropdims(mean(tac_scens; dims=:timesteps); dims=:timesteps) obs_cf_scen_dist = Observable(tac_scen_dist[scen_groups[:counterfactual]]) scen_hist = layout.scen_hist if has_cf - density!(scen_hist, obs_cf_scen_dist, direction=:y, color=cf_hist_alpha) + density!(scen_hist, obs_cf_scen_dist; direction=:y, color=cf_hist_alpha) end obs_ug_scen_dist = Observable(tac_scen_dist[scen_groups[:unguided]]) if has_ug - density!(scen_hist, obs_ug_scen_dist, direction=:y, color=ug_hist_alpha) + density!(scen_hist, obs_ug_scen_dist; direction=:y, color=ug_hist_alpha) end obs_g_scen_dist = Observable(tac_scen_dist[scen_groups[:guided]]) if has_g - density!(scen_hist, obs_g_scen_dist, direction=:y, color=g_hist_alpha) + density!(scen_hist, obs_g_scen_dist; direction=:y, color=g_hist_alpha) end hidedecorations!(scen_hist) @@ -249,7 +248,10 @@ function ADRIA.viz.explore(rs::ResultSet) ylims!(scen_hist, 0.0, maximum(tac_scen_dist)) ms = rs.model_spec - intervention_components = ms[(ms.component.=="Intervention").&(ms.fieldname.!="guided"), [:name, :fieldname, :lower_bound, :upper_bound]] + intervention_components = ms[ + (ms.component .== "Intervention") .& (ms.fieldname .!= "guided"), + [:name, :fieldname, :lower_bound, :upper_bound] + ] interv_names = intervention_components.fieldname interv_idx = findall(x -> x in interv_names, names(X)) @@ -258,14 +260,20 @@ function ADRIA.viz.explore(rs::ResultSet) has_45 = count(X.RCP .== 45) > 0 has_60 = count(X.RCP .== 60) > 0 has_85 = count(X.RCP .== 85) > 0 - t_toggles = [Toggle(f, active=active) for active in [has_45, has_60, has_85, has_cf, has_ug, has_g]] + t_toggles = [ + Toggle(f; active=active) for + active in [has_45, has_60, has_85, has_cf, has_ug, has_g] + ] t_toggle_map = zip( t_toggles, ["RCP 4.5", "RCP 6.0", "RCP 8.5", "Counterfactual", "Unguided", "Guided"], [:black, :black, :black, :red, :green, :blue] ) - labels = [Label(f, "$l", color=lift(x -> x ? c : :gray, t.active)) for (t, l, c) in t_toggle_map] - layout.controls[1:2, 1:2] = grid!(hcat(t_toggles, labels), tellheight=false) + labels = [ + Label(f, "$l"; color=lift(x -> x ? c : :gray, t.active)) for + (t, l, c) in t_toggle_map + ] + layout.controls[1:2, 1:2] = grid!(hcat(t_toggles, labels); tellheight=false) # Controls for guided type guide_toggle_map = zip( @@ -286,7 +294,7 @@ function ADRIA.viz.explore(rs::ResultSet) l2 = Observable("$(round(x[2], digits=2))") push!(interv_sliders, IntervalSlider( - lc[i, 2], + lc[i, 2]; range=LinRange(x[1], x[2], 10), startvalues=(x[1], x[2]) ) @@ -315,7 +323,7 @@ function ADRIA.viz.explore(rs::ResultSet) # g_cv = std(tac_scen_dist[scen_groups[:guided]]) ./ mean(tac_scen_dist[scen_groups[:guided]]) ft_import = Axis( - layout.importance[1, 1], + layout.importance[1, 1]; xticks=([1, 2, 3], ["Mean TAC", "Mean ASV", "Mean Juveniles"]), yticks=(1:length(interv_names), intervention_components.name), title="Relative Importance" @@ -335,7 +343,7 @@ function ADRIA.viz.explore(rs::ResultSet) probas = Observable(outcome_probability(scen_dist)) barplot!( outcomes_ax, - @lift($(probas).values), + @lift($(probas).values); bar_labels=:y, direction=:y, flip_labels_at=@lift(maximum($(probas).values) * 0.9), @@ -353,13 +361,22 @@ function ADRIA.viz.explore(rs::ResultSet) curr_highlighted_sites = _get_seeded_sites(seed_log, (:), (:)) obs_site_sel = Observable(FC(; features=geodata[curr_highlighted_sites])) obs_site_highlight = Observable((:lightgreen, 1.0)) - poly!(map_disp[1, 1], obs_site_sel, color=(:white, 0.0), strokecolor=obs_site_highlight, strokewidth=0.75, overdraw=true) + poly!( + map_disp[1, 1], + obs_site_sel; + color=(:white, 0.0), + strokecolor=obs_site_highlight, + strokewidth=0.75, + overdraw=true + ) # Image file for loading animation # loader_anim = load(LOADER) @info "Setting up interactions" - function update_disp(time_val, tac_val, rcp45, rcp60, rcp85, c_tog, u_tog, g_tog, disp_vals...) + function update_disp( + time_val, tac_val, rcp45, rcp60, rcp85, c_tog, u_tog, g_tog, disp_vals... + ) # Display loading animation # load_anim_display = @async display_loader(traj_display[2, 1], loader_anim) @@ -375,7 +392,11 @@ function ADRIA.viz.explore(rs::ResultSet) bnds[1][] = "$(round(disp_vals[intv][1], digits=2))" bnds[2][] = "$(round(disp_vals[intv][2], digits=2))" - show_idx .= show_idx .& ((X[:, interv_names[intv]] .>= disp_vals[intv][1]) .& (X[:, interv_names[intv]] .<= disp_vals[intv][2])) + show_idx .= + show_idx .& ( + (X[:, interv_names[intv]] .>= disp_vals[intv][1]) .& + (X[:, interv_names[intv]] .<= disp_vals[intv][2]) + ) end # Hide/display scenario types @@ -408,7 +429,12 @@ function ADRIA.viz.explore(rs::ResultSet) hide_idx .= Bool.(ones(Int64, length(hide_idx)) .⊻ show_idx) # Update map - obs_mean_rc_sites[] = vec(mean(mean_rc_sites(timesteps=timespan)[scenarios=show_idx], dims=(:scenarios, :timesteps))) + obs_mean_rc_sites[] = vec( + mean( + mean_rc_sites(; timesteps=timespan)[scenarios=show_idx]; + dims=(:scenarios, :timesteps) + ) + ) seeded_sites = _get_seeded_sites(seed_log, (:), show_idx) site_alpha = 1.0 @@ -428,7 +454,9 @@ function ADRIA.viz.explore(rs::ResultSet) obs_site_highlight[] = (:lightgreen, site_alpha) # Update scenario density - scen_dist = dropdims(mean(tac_scens(timesteps=timespan), dims=:timesteps), dims=:timesteps) + scen_dist = dropdims( + mean(tac_scens(; timesteps=timespan); dims=:timesteps); dims=:timesteps + ) # Hide scenarios that were filtered out cf_dist = scen_dist[show_idx .& scen_groups[:counterfactual]] ug_dist = scen_dist[show_idx .& scen_groups[:unguided]] @@ -469,10 +497,18 @@ function ADRIA.viz.explore(rs::ResultSet) if count(show_idx) > 16 mean_tac_med = relative_sensitivities(X[show_idx, :], scen_dist[show_idx])[interv_idx] - sel_asv_scens = dropdims(mean(asv_scens(timesteps=timespan)[scenarios=show_idx], dims=:timesteps), dims=:timesteps) + sel_asv_scens = dropdims( + mean(asv_scens(; timesteps=timespan)[scenarios=show_idx]; dims=:timesteps); + dims=:timesteps + ) mean_asv_med = relative_sensitivities(X[show_idx, :], sel_asv_scens)[interv_idx] - sel_juves_scens = dropdims(mean(juves_scens(timesteps=timespan)[scenarios=show_idx], dims=:timesteps), dims=:timesteps) + sel_juves_scens = dropdims( + mean( + juves_scens(; timesteps=timespan)[scenarios=show_idx]; dims=:timesteps + ); + dims=:timesteps + ) mean_juves_med = relative_sensitivities(X[show_idx, :], sel_juves_scens)[interv_idx] else # Display nothing if no data is available @@ -488,7 +524,7 @@ function ADRIA.viz.explore(rs::ResultSet) # Update bar plot of outcome probability probas[] = outcome_probability(scen_dist[show_idx]) - ylims!(layout.outcomes, minimum(probas[].values), maximum(probas[].values)) + return ylims!(layout.outcomes, minimum(probas[].values), maximum(probas[].values)) # Clear loading animation # remove_loader(traj_display[2, 1], loader_anim_display) @@ -499,7 +535,8 @@ function ADRIA.viz.explore(rs::ResultSet) # up_timer = Timer(x -> x, 0.25) onany(time_slider.interval, tac_slider.interval, [t.active for t in t_toggles]..., - [sld.interval for sld in interv_sliders]...) do time_val, tac_val, rcp45, rcp60, rcp85, c_tog, u_tog, g_tog, intervs... # i1_val, i2_val, i3_val, i4_val, i5_val, i6_val, i7_val, i8_val, i9_val, i10_val, i11_val, i12_val + [sld.interval for sld in interv_sliders]... + ) do time_val, tac_val, rcp45, rcp60, rcp85, c_tog, u_tog, g_tog, intervs... # i1_val, i2_val, i3_val, i4_val, i5_val, i6_val, i7_val, i8_val, i9_val, i10_val, i11_val, i12_val # Update slider labels left_year_val[] = "$(Int(floor(time_val[1])))" @@ -510,21 +547,25 @@ function ADRIA.viz.explore(rs::ResultSet) if @isdefined up_timer close(up_timer) end - up_timer = Timer(x -> update_disp(time_val, tac_val, rcp45, rcp60, rcp85, c_tog, u_tog, g_tog, intervs...), 2) + up_timer = Timer( + x -> update_disp( + time_val, tac_val, rcp45, rcp60, rcp85, c_tog, u_tog, g_tog, intervs... + ), + 2 + ) end @info "Displaying UI" gl_screen = display(f) # DataInspector() - wait(gl_screen) + return wait(gl_screen) # close(up_timer) end function ADRIA.viz.explore(rs_path::String) return ADRIA.viz.explore(load_results(rs_path)) end - # function explore(rs::ADRIA.ResultSet) # layout = modeler_layout(size=(1920, 1080)) @@ -623,7 +664,6 @@ end # in_pcp_data = normalize(Matrix(rs.inputs[:, input_names])) # # in_pcp_lines = Observable(in_pcp_data) - # # Get mean outcomes for each scenario # outcome_pcp_data = hcat([ # mean_tac_outcomes, @@ -757,7 +797,6 @@ end end # module - # Allow use from terminal if this file is run directly # if abspath(PROGRAM_FILE) == @__FILE__ # if "explore" in ARGS diff --git a/ext/AvizExt/analysis.jl b/ext/AvizExt/analysis.jl index ebb08506e..eaf7ff987 100644 --- a/ext/AvizExt/analysis.jl +++ b/ext/AvizExt/analysis.jl @@ -1,9 +1,10 @@ using ADRIA.analysis: col_normalize using ADRIA.sensitivity - -function relative_sensitivities(X, y::AbstractArray{<:Real}; S=10, stat=:median)::Vector{Float64} - return col_normalize(sensitivity.pawn(X, y; S=S)(Si=stat)) +function relative_sensitivities( + X, y::AbstractArray{<:Real}; S=10, stat=:median +)::Vector{Float64} + return col_normalize(sensitivity.pawn(X, y; S=S)(; Si=stat)) end """ @@ -22,6 +23,12 @@ function outcome_probability(data::AbstractVector)::NamedTuple count((p_outcomes .>= 0.50) .& (p_outcomes .<= 0.70)) / n, count((p_outcomes .> 0.20) .& (p_outcomes .< 0.50)) / n, count(p_outcomes .< 0.20) / n], - labels=["Very High\n> 80%", "High\n70 - 80%", "Medium\n50 - 70%", "Low\n20 - 50%", "Very Low\n< 20%"] + labels=[ + "Very High\n> 80%", + "High\n70 - 80%", + "Medium\n50 - 70%", + "Low\n20 - 50%", + "Very Low\n< 20%" + ] ) end diff --git a/ext/AvizExt/layout.jl b/ext/AvizExt/layout.jl index a9798d0b6..59ba779d2 100644 --- a/ext/AvizExt/layout.jl +++ b/ext/AvizExt/layout.jl @@ -22,7 +22,7 @@ └─────┘ └────────────────┘ └────────────────────────────────────────┘ """ function comms_layout(; size=(1920, 1080)) - f = Figure(size=size) + f = Figure(; size=size) main = f[1:6, 1:9] = GridLayout() @@ -32,7 +32,7 @@ function comms_layout(; size=(1920, 1080)) # Trajectories and density plot trajectory = main[1:2, 3:8] = GridLayout() temporal = Axis( - trajectory[1, 2:7], + trajectory[1, 2:7]; title="Scenario Trajectories", xlabel="Year", ylabel="Mean TAC (m²)" @@ -64,11 +64,17 @@ function comms_layout(; size=(1920, 1080)) # Outcome probabilities outcome_view = main[3:5, 8:9] outcomes = Axis( - outcome_view, + outcome_view; title="Probability Occurrence", xlabel="Outcomes", xticks=([1, 2, 3, 4, 5], - ["Very High\n> 80%", "High\n70 - 80%", "Medium\n50 - 70%", "Low\n20 - 50%", "Very Low\n< 20%"]) + [ + "Very High\n> 80%", + "High\n70 - 80%", + "Medium\n50 - 70%", + "Low\n20 - 50%", + "Very Low\n< 20%" + ]) ) messages = Axis(main[6, 5:9]) @@ -76,7 +82,7 @@ function comms_layout(; size=(1920, 1080)) hidespines!(messages) text!(messages, 0.0, - 0.5, + 0.5; text="Zoom: Mouse wheel\nPan: Hold right-click\nReset view: Ctrl + Left-click", align=(:left, :center), justification=:left, @@ -84,7 +90,9 @@ function comms_layout(; size=(1920, 1080)) return (figure=f, controls=controls, - trajectory=(temporal=temporal, outcome_slider=traj_outcome_sld, time_slider=traj_time_sld), + trajectory=( + temporal=temporal, outcome_slider=traj_outcome_sld, time_slider=traj_time_sld + ), scen_hist=scen_hist, map=map[1, 1], importance=feat_importance, @@ -92,8 +100,6 @@ function comms_layout(; size=(1920, 1080)) messages) end - - """ ┌──┐ ┌─────────────────────────┐ ┌─────┐ ┌──────────────┐ │ │ │ │ │ H │ │ │ @@ -119,7 +125,7 @@ end └────────────────────────────┘ └───────────────────┘ """ function modeler_layout(; size=(1920, 1080)) - f = Figure(size=size) + f = Figure(; size=size) # controls = f[1:3, 1] = GridLayout() main = f[1:3, 1:6] = GridLayout() @@ -130,7 +136,7 @@ function modeler_layout(; size=(1920, 1080)) traj_outcome_sld = spatial_temporal[1, 1] temporal = Axis( - spatial_temporal[1, 2:4], + spatial_temporal[1, 2:4]; title="Scenario Trajectories", xlabel="Year", ylabel="Mean TAC (m²)" @@ -148,26 +154,27 @@ function modeler_layout(; size=(1920, 1080)) spatial = spatial_temporal[1, 6] interv_pcp = Axis( - main[2, 1:6], + main[2, 1:6]; title="Interventions" ) outcomes = main[3, 1:6] = GridLayout() pairplot = outcomes[1:3, 1:3] = GridLayout() outcome_pcp = Axis( - outcomes[1:3, 4:6], + outcomes[1:3, 4:6]; title="Outcomes" ) return (figure=f, # controls=controls, - trajectory=(temporal=temporal, outcome_slider=traj_outcome_sld, time_slider=traj_time_sld), + trajectory=( + temporal=temporal, outcome_slider=traj_outcome_sld, time_slider=traj_time_sld + ), scen_hist=scen_hist, map=spatial, interv_pcp=interv_pcp, pairplot=pairplot, outcome_pcp=outcome_pcp) end - # """ # ┌─────────────┐ ┌─────────────────────┐ ┌────────┐ # │ │ │ │ │ │ diff --git a/ext/AvizExt/plotting.jl b/ext/AvizExt/plotting.jl index 86eebdacb..b8722beeb 100644 --- a/ext/AvizExt/plotting.jl +++ b/ext/AvizExt/plotting.jl @@ -1,6 +1,5 @@ using ADRIA.analysis: col_normalize - function pairplot!(display, outcomes::NamedTuple) n_outcomes = length(outcomes) for (row, (r_m, r_out)) in zip(1:n_outcomes, pairs(outcomes)) # rows @@ -13,10 +12,10 @@ function pairplot!(display, outcomes::NamedTuple) if 1 <= col <= n_outcomes if row == 1 & col != 2 # show y-axis on second plot on first row - hideydecorations!(t, grid=false, ticks=false) + hideydecorations!(t; grid=false, ticks=false) elseif row > 1 && col > 1 # show y-axis only in first column - hideydecorations!(t, grid=false, ticks=false) + hideydecorations!(t; grid=false, ticks=false) end end @@ -25,10 +24,10 @@ function pairplot!(display, outcomes::NamedTuple) # and second-last row of the final column if col == n_outcomes if row != n_outcomes - 1 - hidexdecorations!(t, grid=false, ticks=false) + hidexdecorations!(t; grid=false, ticks=false) end else - hidexdecorations!(t, grid=false, ticks=false) + hidexdecorations!(t; grid=false, ticks=false) end end @@ -37,7 +36,7 @@ function pairplot!(display, outcomes::NamedTuple) end if r_m == c_m - hidedecorations!(t, label=false) + hidedecorations!(t; label=false) hidespines!(t) continue end @@ -47,7 +46,6 @@ function pairplot!(display, outcomes::NamedTuple) end end - function pairplot!(display, data, names) n_outcomes = size(data, 2) @@ -65,11 +63,11 @@ function pairplot!(display, data, names) if row == 1 && col != 2 # show y-axis on second plot on first row if col > 1 - hideydecorations!(t, grid=false, ticks=false) + hideydecorations!(t; grid=false, ticks=false) end elseif row > 1 && col > 1 # show y-axis only in first column - hideydecorations!(t, grid=false, ticks=false) + hideydecorations!(t; grid=false, ticks=false) end if row < n_outcomes @@ -77,10 +75,10 @@ function pairplot!(display, data, names) # and second-last row of the final column if col == n_outcomes if row != n_outcomes - 1 - hidexdecorations!(t, grid=false, ticks=false) + hidexdecorations!(t; grid=false, ticks=false) end else - hidexdecorations!(t, grid=false, ticks=false) + hidexdecorations!(t; grid=false, ticks=false) end end @@ -89,17 +87,17 @@ function pairplot!(display, data, names) end if r_m == c_m - hidedecorations!(t, label=false) + hidedecorations!(t; label=false) hidespines!(t) continue end - scatter!(t, vec(c_out), vec(r_out), markersize=1) + scatter!(t, vec(c_out), vec(r_out); markersize=1) end end rowgap!(display, 20) - colgap!(display, 20) + return colgap!(display, 20) end """ @@ -156,7 +154,7 @@ function pcp!(ax, data, names::Union{Vector,Tuple}; color=(:blue, 0.1)) n = size(data, 2) vlines!(ax, 1:n; color=:black) - series!(ax, 1:n, data, color=color) + series!(ax, 1:n, data; color=color) ax.xticks = (1:n, [string.(names)...]) ax.xticklabelrotation = 0.45 diff --git a/ext/AvizExt/theme.jl b/ext/AvizExt/theme.jl index 29af1f312..897de7b4f 100644 --- a/ext/AvizExt/theme.jl +++ b/ext/AvizExt/theme.jl @@ -12,7 +12,7 @@ const COLORS::Dict{Symbol,Union{Symbol,String}} = Dict( :non_target => "#ff7f00", :order => :dodgerblue, :topsis => :deepskyblue4, - :vikor => :midnightblue, + :vikor => :midnightblue ) function colors( @@ -28,7 +28,7 @@ function colors( end function colors( scen_groups::Dict{Symbol,BitVector}, - weight::Float64, + weight::Float64 )::Vector{Tuple{Symbol,Float64}} groups = collect(keys(scen_groups)) n_scens = length(scen_groups[groups[1]]) @@ -52,7 +52,7 @@ function colors( scen_colors[group].r, scen_colors[group].g, scen_colors[group].b, - weights[group], + weights[group] ) for group in groups ) end @@ -81,7 +81,7 @@ function scenario_colors!( scen_types::NamedTuple, weight::Float64, hide::BitVector, - guide_toggle_map, + guide_toggle_map ) color_map .= obs_color[] for (t, l, c) in guide_toggle_map diff --git a/ext/AvizExt/viz/clustering.jl b/ext/AvizExt/viz/clustering.jl index aba52d09e..79923269a 100644 --- a/ext/AvizExt/viz/clustering.jl +++ b/ext/AvizExt/viz/clustering.jl @@ -1,193 +1,193 @@ -using Statistics - -""" - scenarios(outcomes::AbstractMatrix, clusters::Vector{Int64}; opts::Dict{Symbol,<:Any}=Dict{Symbol,Any}(), fig_opts::Dict{Symbol,<:Any}=Dict{Symbol,Any}(), axis_opts::Dict{Symbol,<:Any}=Dict{Symbol,Any}()) - scenarios!(g::Union{GridLayout,GridPosition}, outcomes::AbstractMatrix, clusters::Vector{Int64}; opts::Dict{Symbol,<:Any}=Dict{Symbol,Any}(), axis_opts::Dict{Symbol,<:Any}=Dict{Symbol,Any}()) - -Visualize clustered time series of scenarios. - -# Arguments -- `outcomes` : AbstractMatrix of outcomes for several scenarios or sites -- `clusters` : Vector of numbers corresponding to clusters -- `opts` : Aviz options - - `summarize` : plot confidence interval. Defaults to true - -# Returns -Figure -""" -function ADRIA.viz.scenarios( - outcomes::AbstractMatrix{<:Real}, - clusters::Union{BitVector,AbstractVector{Int64}}; - opts::OPT_TYPE=DEFAULT_OPT_TYPE(), - fig_opts::OPT_TYPE=DEFAULT_OPT_TYPE(), - axis_opts::OPT_TYPE=DEFAULT_OPT_TYPE(), -)::Figure - f = Figure(; fig_opts...) - g = f[1, 1] = GridLayout() - - ADRIA.viz.scenarios!(g, outcomes, clusters; opts=opts, axis_opts=axis_opts) - - return f -end -function ADRIA.viz.scenarios!( - g::Union{GridLayout,GridPosition}, - outcomes::AbstractMatrix{<:Real}, - clusters::Union{BitVector,Vector{Int64}}; - opts::OPT_TYPE=DEFAULT_OPT_TYPE(), - axis_opts::OPT_TYPE=DEFAULT_OPT_TYPE(), - series_opts::OPT_TYPE=DEFAULT_OPT_TYPE(), -)::Union{GridLayout,GridPosition} - # Ensure last year is always shown in x-axis - xtick_vals = get(axis_opts, :xticks, _time_labels(timesteps(outcomes))) - xtick_rot = get(axis_opts, :xticklabelrotation, 2 / π) - ax = Axis(g[1, 1]; xticks=xtick_vals, xticklabelrotation=xtick_rot, axis_opts...) - - scen_groups = ADRIA.analysis.scenario_clusters(clusters) - opts[:histogram] = false - opts[:legend_labels] = sort(collect(keys(scen_groups))) - - return ADRIA.viz.scenarios!( - g, - ax, - outcomes, - scen_groups; - opts=opts, - axis_opts=axis_opts, - series_opts=series_opts, - ) -end - -""" - clustered_scenarios(outcomes::AbstractMatrix{<:Real}, clusters::Vector{Int64}; opts::Dict{Symbol,<:Any}=Dict{Symbol,Any}(), fig_opts::Dict{Symbol,<:Any}=Dict{Symbol,Any}(), axis_opts::Dict{Symbol,<:Any}=Dict{Symbol,Any}()) - clustered_scenarios!(g::Union{GridLayout,GridPosition}, outcomes::AbstractMatrix{<:Real}, clusters::Vector{Int64}; opts::Dict{Symbol,<:Any}=Dict{Symbol,Any}(), axis_opts::Dict{Symbol,<:Any}=Dict{Symbol,Any}()) - -Visualize clustered time series of scenarios. - -# Arguments -- `outcomes` : Matrix of outcomes for several scenarios or sites -- `clusters` : Vector of numbers corresponding to clusters -- `opts` : Aviz options - - `summarize` : plot confidence interval. Defaults to true - -# Returns -Figure -""" -function ADRIA.viz.clustered_scenarios( - outcomes::AbstractMatrix{<:Real}, - clusters::Union{BitVector,Vector{Int64}}; - opts::OPT_TYPE=DEFAULT_OPT_TYPE(), - fig_opts::OPT_TYPE=DEFAULT_OPT_TYPE(), - axis_opts::OPT_TYPE=DEFAULT_OPT_TYPE(), -)::Figure - return ADRIA.viz.scenarios( - outcomes, clusters; opts=opts, fig_opts=fig_opts, axis_opts=axis_opts - ) -end -function ADRIA.viz.clustered_scenarios!( - g::Union{GridLayout,GridPosition}, - outcomes::AbstractMatrix{<:Real}, - clusters::Union{BitVector,Vector{Int64}}; - opts::OPT_TYPE=DEFAULT_OPT_TYPE(), - axis_opts::OPT_TYPE=DEFAULT_OPT_TYPE(), -)::Union{GridLayout,GridPosition} - return ADRIA.viz.scenarios!(g, outcomes, clusters; axis_opts=axis_opts, opts=opts) -end - -""" - map(rs::Union{Domain,ResultSet}, data::AbstractMatrix, clusters::AbstractVector{Int64}; opts::Dict{Symbol,<:Any}=Dict{Symbol,Any}(), fig_opts::Dict{Symbol,<:Any}=Dict{Symbol,Any}(), axis_opts::Dict{Symbol,<:Any}=Dict{Symbol,Any}()) - map(g, rs, data, clusters; opts::Dict{Symbol,<:Any}=Dict{Symbol,Any}(), axis_opts::Dict{Symbol,<:Any}=Dict{Symbol,Any}()) - -Visualize clustered time series for each site and map. - -# Arguments -- `rs` : ResultSet -- `data` : Vector of summary statistics data for each location -- `clusters` : Vector of numbers corresponding to clusters -- `opts` : Options specific to this plotting method - - `highlight` : Vector of colors indicating cluster membership for each location. - - `summary` : function (which must support the `dims` keyword) to summarize data with. - Default: `mean` - -# Returns -Figure -""" -function ADRIA.viz.map( - rs::Union{Domain,ResultSet}, - data::AbstractArray{<:Real}, - clusters::Union{BitVector,AbstractVector{Int64}}; - opts::OPT_TYPE=DEFAULT_OPT_TYPE(), - fig_opts::OPT_TYPE=DEFAULT_OPT_TYPE(), - axis_opts::OPT_TYPE=DEFAULT_OPT_TYPE(), -)::Figure - f = Figure(; fig_opts...) - g = f[1, 1] = GridLayout() - ADRIA.viz.map!(g, rs, data, clusters; opts=opts, axis_opts=axis_opts) - - return f -end -function ADRIA.viz.map!( - g::Union{GridLayout,GridPosition}, - rs::Union{Domain,ResultSet}, - data::AbstractVector{<:Real}, - clusters::Union{BitVector,Vector{Int64}}; - opts::OPT_TYPE=DEFAULT_OPT_TYPE(), - axis_opts::OPT_TYPE=DEFAULT_OPT_TYPE(), -)::Union{GridLayout,GridPosition} - # Although this function is called scenario_clusters, here we have locations clusters - loc_groups::Dict{Symbol,BitVector} = ADRIA.analysis.scenario_clusters(clusters) - group_colors::Dict{Symbol,Union{Symbol,RGBA{Float32}}} = colors(loc_groups) - - legend_params::Tuple = _cluster_legend_params(data, loc_groups, group_colors) - - _colors::Vector{Union{Symbol,RGBA{Float32}}} = Vector{Union{Symbol,RGBA{Float32}}}( - undef, length(clusters) - ) - for (idx, filt) in loc_groups - _colors[filt] .= group_colors[idx] - end - - # Highlight is a vector of stroke colors for each location - opts[:highlight] = get(opts, :highlight, _colors) - opts[:legend_params] = get(opts, :legend_params, legend_params) - - ADRIA.viz.map!(g, rs, data; opts=opts, axis_opts=axis_opts) - - return g -end - -""" - _cluster_legend_params(data::AbstractVector{<:Real}, scen_groups::Dict{Symbol,BitVector}, group_colors::Dict{Symbol,Union{Symbol,RGBA{Float32}}})::Tuple - -Color parameter for current cluster weighted by number of scenarios. - -# Arguments -- `data` : Vector of some metric outcome for each site -- `loc_groups` : Dictionary of (group_names => filter), where filter is a BitVector to -select locations that belong to each group -- `group_colors` : Dictionary of (group_names => colors), where colors can be Symbols or -RGBA{Float32} - -# Returns -Tuple of legend params to be passed to map! containing legend_entries, legend_labels and -legend_title (in that order). -""" -function _cluster_legend_params( - data::AbstractVector{<:Real}, - loc_groups::Dict{Symbol,BitVector}, - group_colors::Dict{Symbol,Union{Symbol,RGBA{Float32}}}, -)::Tuple - group_keys = sort(collect(keys(group_colors))) - colors = [group_colors[key] for key in group_keys] - legend_entries = [PolyElement(; color=c, strokecolor=:transparent) for c in colors] - - label_means::Vector{Float64} = zeros(length(group_keys)) - for (idx_key, key) in enumerate(group_keys) - label_means[idx_key] = mean(data[loc_groups[key]]) - end - - legend_labels = - labels(group_keys) .* ": " .* ADRIA.to_scientific.(label_means, digits=2) - legend_title = "Cluster mean" - - return (legend_entries, legend_labels, legend_title) -end +using Statistics + +""" + scenarios(outcomes::AbstractMatrix, clusters::Vector{Int64}; opts::Dict{Symbol,<:Any}=Dict{Symbol,Any}(), fig_opts::Dict{Symbol,<:Any}=Dict{Symbol,Any}(), axis_opts::Dict{Symbol,<:Any}=Dict{Symbol,Any}()) + scenarios!(g::Union{GridLayout,GridPosition}, outcomes::AbstractMatrix, clusters::Vector{Int64}; opts::Dict{Symbol,<:Any}=Dict{Symbol,Any}(), axis_opts::Dict{Symbol,<:Any}=Dict{Symbol,Any}()) + +Visualize clustered time series of scenarios. + +# Arguments +- `outcomes` : AbstractMatrix of outcomes for several scenarios or sites +- `clusters` : Vector of numbers corresponding to clusters +- `opts` : Aviz options + - `summarize` : plot confidence interval. Defaults to true + +# Returns +Figure +""" +function ADRIA.viz.scenarios( + outcomes::AbstractMatrix{<:Real}, + clusters::Union{BitVector,AbstractVector{Int64}}; + opts::OPT_TYPE=DEFAULT_OPT_TYPE(), + fig_opts::OPT_TYPE=DEFAULT_OPT_TYPE(), + axis_opts::OPT_TYPE=DEFAULT_OPT_TYPE() +)::Figure + f = Figure(; fig_opts...) + g = f[1, 1] = GridLayout() + + ADRIA.viz.scenarios!(g, outcomes, clusters; opts=opts, axis_opts=axis_opts) + + return f +end +function ADRIA.viz.scenarios!( + g::Union{GridLayout,GridPosition}, + outcomes::AbstractMatrix{<:Real}, + clusters::Union{BitVector,Vector{Int64}}; + opts::OPT_TYPE=DEFAULT_OPT_TYPE(), + axis_opts::OPT_TYPE=DEFAULT_OPT_TYPE(), + series_opts::OPT_TYPE=DEFAULT_OPT_TYPE() +)::Union{GridLayout,GridPosition} + # Ensure last year is always shown in x-axis + xtick_vals = get(axis_opts, :xticks, _time_labels(timesteps(outcomes))) + xtick_rot = get(axis_opts, :xticklabelrotation, 2 / π) + ax = Axis(g[1, 1]; xticks=xtick_vals, xticklabelrotation=xtick_rot, axis_opts...) + + scen_groups = ADRIA.analysis.scenario_clusters(clusters) + opts[:histogram] = false + opts[:legend_labels] = sort(collect(keys(scen_groups))) + + return ADRIA.viz.scenarios!( + g, + ax, + outcomes, + scen_groups; + opts=opts, + axis_opts=axis_opts, + series_opts=series_opts + ) +end + +""" + clustered_scenarios(outcomes::AbstractMatrix{<:Real}, clusters::Vector{Int64}; opts::Dict{Symbol,<:Any}=Dict{Symbol,Any}(), fig_opts::Dict{Symbol,<:Any}=Dict{Symbol,Any}(), axis_opts::Dict{Symbol,<:Any}=Dict{Symbol,Any}()) + clustered_scenarios!(g::Union{GridLayout,GridPosition}, outcomes::AbstractMatrix{<:Real}, clusters::Vector{Int64}; opts::Dict{Symbol,<:Any}=Dict{Symbol,Any}(), axis_opts::Dict{Symbol,<:Any}=Dict{Symbol,Any}()) + +Visualize clustered time series of scenarios. + +# Arguments +- `outcomes` : Matrix of outcomes for several scenarios or sites +- `clusters` : Vector of numbers corresponding to clusters +- `opts` : Aviz options + - `summarize` : plot confidence interval. Defaults to true + +# Returns +Figure +""" +function ADRIA.viz.clustered_scenarios( + outcomes::AbstractMatrix{<:Real}, + clusters::Union{BitVector,Vector{Int64}}; + opts::OPT_TYPE=DEFAULT_OPT_TYPE(), + fig_opts::OPT_TYPE=DEFAULT_OPT_TYPE(), + axis_opts::OPT_TYPE=DEFAULT_OPT_TYPE() +)::Figure + return ADRIA.viz.scenarios( + outcomes, clusters; opts=opts, fig_opts=fig_opts, axis_opts=axis_opts + ) +end +function ADRIA.viz.clustered_scenarios!( + g::Union{GridLayout,GridPosition}, + outcomes::AbstractMatrix{<:Real}, + clusters::Union{BitVector,Vector{Int64}}; + opts::OPT_TYPE=DEFAULT_OPT_TYPE(), + axis_opts::OPT_TYPE=DEFAULT_OPT_TYPE() +)::Union{GridLayout,GridPosition} + return ADRIA.viz.scenarios!(g, outcomes, clusters; axis_opts=axis_opts, opts=opts) +end + +""" + map(rs::Union{Domain,ResultSet}, data::AbstractMatrix, clusters::AbstractVector{Int64}; opts::Dict{Symbol,<:Any}=Dict{Symbol,Any}(), fig_opts::Dict{Symbol,<:Any}=Dict{Symbol,Any}(), axis_opts::Dict{Symbol,<:Any}=Dict{Symbol,Any}()) + map(g, rs, data, clusters; opts::Dict{Symbol,<:Any}=Dict{Symbol,Any}(), axis_opts::Dict{Symbol,<:Any}=Dict{Symbol,Any}()) + +Visualize clustered time series for each site and map. + +# Arguments +- `rs` : ResultSet +- `data` : Vector of summary statistics data for each location +- `clusters` : Vector of numbers corresponding to clusters +- `opts` : Options specific to this plotting method + - `highlight` : Vector of colors indicating cluster membership for each location. + - `summary` : function (which must support the `dims` keyword) to summarize data with. + Default: `mean` + +# Returns +Figure +""" +function ADRIA.viz.map( + rs::Union{Domain,ResultSet}, + data::AbstractArray{<:Real}, + clusters::Union{BitVector,AbstractVector{Int64}}; + opts::OPT_TYPE=DEFAULT_OPT_TYPE(), + fig_opts::OPT_TYPE=DEFAULT_OPT_TYPE(), + axis_opts::OPT_TYPE=DEFAULT_OPT_TYPE() +)::Figure + f = Figure(; fig_opts...) + g = f[1, 1] = GridLayout() + ADRIA.viz.map!(g, rs, data, clusters; opts=opts, axis_opts=axis_opts) + + return f +end +function ADRIA.viz.map!( + g::Union{GridLayout,GridPosition}, + rs::Union{Domain,ResultSet}, + data::AbstractVector{<:Real}, + clusters::Union{BitVector,Vector{Int64}}; + opts::OPT_TYPE=DEFAULT_OPT_TYPE(), + axis_opts::OPT_TYPE=DEFAULT_OPT_TYPE() +)::Union{GridLayout,GridPosition} + # Although this function is called scenario_clusters, here we have locations clusters + loc_groups::Dict{Symbol,BitVector} = ADRIA.analysis.scenario_clusters(clusters) + group_colors::Dict{Symbol,Union{Symbol,RGBA{Float32}}} = colors(loc_groups) + + legend_params::Tuple = _cluster_legend_params(data, loc_groups, group_colors) + + _colors::Vector{Union{Symbol,RGBA{Float32}}} = Vector{Union{Symbol,RGBA{Float32}}}( + undef, length(clusters) + ) + for (idx, filt) in loc_groups + _colors[filt] .= group_colors[idx] + end + + # Highlight is a vector of stroke colors for each location + opts[:highlight] = get(opts, :highlight, _colors) + opts[:legend_params] = get(opts, :legend_params, legend_params) + + ADRIA.viz.map!(g, rs, data; opts=opts, axis_opts=axis_opts) + + return g +end + +""" + _cluster_legend_params(data::AbstractVector{<:Real}, scen_groups::Dict{Symbol,BitVector}, group_colors::Dict{Symbol,Union{Symbol,RGBA{Float32}}})::Tuple + +Color parameter for current cluster weighted by number of scenarios. + +# Arguments +- `data` : Vector of some metric outcome for each site +- `loc_groups` : Dictionary of (group_names => filter), where filter is a BitVector to +select locations that belong to each group +- `group_colors` : Dictionary of (group_names => colors), where colors can be Symbols or +RGBA{Float32} + +# Returns +Tuple of legend params to be passed to map! containing legend_entries, legend_labels and +legend_title (in that order). +""" +function _cluster_legend_params( + data::AbstractVector{<:Real}, + loc_groups::Dict{Symbol,BitVector}, + group_colors::Dict{Symbol,Union{Symbol,RGBA{Float32}}} +)::Tuple + group_keys = sort(collect(keys(group_colors))) + colors = [group_colors[key] for key in group_keys] + legend_entries = [PolyElement(; color=c, strokecolor=:transparent) for c in colors] + + label_means::Vector{Float64} = zeros(length(group_keys)) + for (idx_key, key) in enumerate(group_keys) + label_means[idx_key] = mean(data[loc_groups[key]]) + end + + legend_labels = + labels(group_keys) .* ": " .* ADRIA.to_scientific.(label_means, digits=2) + legend_title = "Cluster mean" + + return (legend_entries, legend_labels, legend_title) +end diff --git a/ext/AvizExt/viz/environment/cyclones.jl b/ext/AvizExt/viz/environment/cyclones.jl index 472cf2f17..e22d44b0d 100644 --- a/ext/AvizExt/viz/environment/cyclones.jl +++ b/ext/AvizExt/viz/environment/cyclones.jl @@ -1,12 +1,13 @@ using Statistics - - -function ADRIA.viz.cyclone_scenario(dom::Domain, scen_id::Int64; fig_opts=Dict(), axis_opts=Dict()) +function ADRIA.viz.cyclone_scenario( + dom::Domain, scen_id::Int64; fig_opts=Dict(), axis_opts=Dict() +) taxa_mean = dropdims( - mean(dom.cyclone_mortality_scens[:, :, :, scen_id] .* 100.0, dims=:species), dims=:species + mean(dom.cyclone_mortality_scens[:, :, :, scen_id] .* 100.0; dims=:species); + dims=:species ) - mean_cyc_scen = dropdims(mean(taxa_mean, dims=:locations), dims=:locations) + mean_cyc_scen = dropdims(mean(taxa_mean; dims=:locations); dims=:locations) fig_opts[:size] = get(axis_opts, :size, (800, 400)) f = Figure(; fig_opts...) @@ -28,4 +29,4 @@ function ADRIA.viz.cyclone_scenario(dom::Domain, scen_id::Int64; fig_opts=Dict() ) return f -end \ No newline at end of file +end diff --git a/ext/AvizExt/viz/environment/dhw.jl b/ext/AvizExt/viz/environment/dhw.jl index 668659930..6a805ec3c 100644 --- a/ext/AvizExt/viz/environment/dhw.jl +++ b/ext/AvizExt/viz/environment/dhw.jl @@ -1,10 +1,10 @@ using Statistics - - -function ADRIA.viz.dhw_scenario(dom::Domain, scen_id::Int64; fig_opts=Dict(), axis_opts=Dict()) +function ADRIA.viz.dhw_scenario( + dom::Domain, scen_id::Int64; fig_opts=Dict(), axis_opts=Dict() +) loc_scens = dom.dhw_scens[:, :, scen_id] - mean_dhw_scen = dropdims(mean(loc_scens, dims=2), dims=2) + mean_dhw_scen = dropdims(mean(loc_scens; dims=2); dims=2) ts = dom.env_layer_md.timeframe[1]:dom.env_layer_md.timeframe[end] @@ -28,4 +28,4 @@ function ADRIA.viz.dhw_scenario(dom::Domain, scen_id::Int64; fig_opts=Dict(), ax ) return f -end \ No newline at end of file +end diff --git a/ext/AvizExt/viz/location_selection.jl b/ext/AvizExt/viz/location_selection.jl index dc90ecddf..96060b777 100644 --- a/ext/AvizExt/viz/location_selection.jl +++ b/ext/AvizExt/viz/location_selection.jl @@ -58,7 +58,7 @@ function ADRIA.viz.ranks_to_frequencies!( rs, frequencies[ranks=rank_ids[1]]; opts=opts, - axis_opts=axis_opts, + axis_opts=axis_opts ) legend_els[1] = PolyElement(; color=all_colormaps[Symbol(rank_ids[1])][2], strokecolor=:grey, strokewidth=1 @@ -75,7 +75,7 @@ function ADRIA.viz.ranks_to_frequencies!( strokecolor=:grey, strokewidth=0.5, linestyle=:solid, - overdraw=true, + overdraw=true ) legend_els[rr] = PolyElement(; color=all_colormaps[Symbol(rr)][2], strokecolor=:grey, strokewidth=1 @@ -97,7 +97,7 @@ function ADRIA.viz.ranks_to_frequencies!( opts[:color_map] = get( opts, :color_map, - [RGBA{Float32}(1.0, 1.0, 1.0, 1.0), RGBA{Float32}(0.00784314, 0.243137, 1.0, 1.0)], + [RGBA{Float32}(1.0, 1.0, 1.0, 1.0), RGBA{Float32}(0.00784314, 0.243137, 1.0, 1.0)] ) return ADRIA.viz.map!( @@ -105,7 +105,7 @@ function ADRIA.viz.ranks_to_frequencies!( rs, frequencies[ranks=rank_id].data; opts=opts, - axis_opts=axis_opts, + axis_opts=axis_opts ) end function ADRIA.viz.ranks_to_frequencies( @@ -124,7 +124,7 @@ function ADRIA.viz.ranks_to_frequencies( frequencies, rank_ids; opts=opts, - axis_opts=axis_opts, + axis_opts=axis_opts ) return f diff --git a/ext/AvizExt/viz/rule_extraction.jl b/ext/AvizExt/viz/rule_extraction.jl index a71236142..9e5c83283 100644 --- a/ext/AvizExt/viz/rule_extraction.jl +++ b/ext/AvizExt/viz/rule_extraction.jl @@ -62,7 +62,7 @@ function ADRIA.viz.rules_scatter( rules; opts=opts, fig_opts=fig_opts, - axis_opts=axis_opts, + axis_opts=axis_opts ) end function ADRIA.viz.rules_scatter!( @@ -98,13 +98,13 @@ function ADRIA.viz.rules_scatter!( feature_names = _feature_names(fieldnames, spec) ax::Axis = Axis( - sub_g[r, c], + sub_g[r, c]; xlabel=feature_names[1], ylabel=feature_names[2], title=_readable_condition(condition, feature_names), titlesize=title_size, xlabelsize=labels_size, - ylabelsize=labels_size; + ylabelsize=labels_size, axis_opts... ) @@ -117,10 +117,10 @@ function ADRIA.viz.rules_scatter!( xlims!(x_min, x_max), ylims!(y_min, y_max) for c in unique(clusters) - x = x_features[c.==clusters] - y = y_features[c.==clusters] + x = x_features[c .== clusters] + y = y_features[c .== clusters] cat_color = c == 1 ? colors[1] : colors[2] - scatter!(ax, x, y, color=cat_color, marker=marker, markersize=4) + scatter!(ax, x, y; color=cat_color, marker=marker, markersize=4) end if get(opts, :target_area, true) @@ -130,38 +130,38 @@ function ADRIA.viz.rules_scatter!( end # Create Legend - markers = MarkerElement[MarkerElement(color=_c, marker=marker) for _c in colors] + markers = MarkerElement[MarkerElement(; color=_c, marker=marker) for _c in colors] Legend( - sub_g[1, n_cols+1], + sub_g[1, n_cols + 1], markers, labels, - "Clusters", + "Clusters"; halign=:left, valign=:top, margin=(5, 5, 5, 5) ) - g + return g end function _highlight_target_area(ax::Axis, condition::Vector{Vector}, scenarios::DataFrame) # Draw lines at clause breakpoints - vlines!(ax, [last(condition[1])], color=(:black, 0.4)) - hlines!(ax, [last(condition[2])], color=(:black, 0.4)) + vlines!(ax, [last(condition[1])]; color=(:black, 0.4)) + hlines!(ax, [last(condition[2])]; color=(:black, 0.4)) # Highlight target area - poly!(ax, _target_area(scenarios, condition), color=(:black, 0.08)) + return poly!(ax, _target_area(scenarios, condition); color=(:black, 0.08)) end function _find_limits(features::Vector{<:Real}) delta = (maximum(features) - minimum(features)) * 0.1 - [minimum(features) - delta, maximum(features) + delta] + return [minimum(features) - delta, maximum(features) + delta] end function _readable_condition(condition::Vector{Vector}, feature_names::Vector{String}) inequalities = [c[2] == :L ? " < " : " ≥ " for c in condition] values = string.([round(c[3]; digits=2) for c in condition]) - join(feature_names .* inequalities .* values, "\n") + return join(feature_names .* inequalities .* values, "\n") end function _target_area(scenarios::DataFrame, condition::Vector{Vector}) @@ -172,9 +172,9 @@ function _target_area(scenarios::DataFrame, condition::Vector{Vector}) x, y = [c[2] == :L ? l[1] : c[3] for (c, l) in zip(condition, lims)] w, h = [c[2] == :L ? c[3] - l[1] : l[2] - c[3] for (c, l) in zip(condition, lims)] - Rect(x, y, w, h) + return Rect(x, y, w, h) end function _feature_names(fieldnames::Vector{String}, spec::DataFrame)::Vector{String} - return [spec[spec.fieldname.==f, :name][1] for f in Symbol.(fieldnames)] + return [spec[spec.fieldname .== f, :name][1] for f in Symbol.(fieldnames)] end diff --git a/ext/AvizExt/viz/scenarios.jl b/ext/AvizExt/viz/scenarios.jl index 0e7c9e203..9ae3024bf 100644 --- a/ext/AvizExt/viz/scenarios.jl +++ b/ext/AvizExt/viz/scenarios.jl @@ -53,7 +53,7 @@ function ADRIA.viz.scenarios( opts=opts, fig_opts=fig_opts, axis_opts=axis_opts, - series_opts=series_opts, + series_opts=series_opts ) end function ADRIA.viz.scenarios!( @@ -72,7 +72,7 @@ function ADRIA.viz.scenarios!( outcomes; opts=opts, axis_opts=axis_opts, - series_opts=series_opts, + series_opts=series_opts ) end function ADRIA.viz.scenarios( @@ -131,7 +131,7 @@ function ADRIA.viz.scenarios!( xtick_rot = get(axis_opts, :xticklabelrotation, 2 / π) ax = Axis(g[1, 1]; xticks=xtick_vals, xticklabelrotation=xtick_rot, axis_opts...) - _scenarios = copy(scenarios[1:end.∈[outcomes.scenarios], :]) + _scenarios = copy(scenarios[1:end .∈ [outcomes.scenarios], :]) scen_groups = if get(opts, :by_RCP, false) ADRIA.analysis.scenario_rcps(_scenarios) else @@ -145,7 +145,7 @@ function ADRIA.viz.scenarios!( scen_groups; opts=opts, axis_opts=axis_opts, - series_opts=series_opts, + series_opts=series_opts ) end function ADRIA.viz.scenarios!( @@ -165,7 +165,6 @@ function ADRIA.viz.scenarios!( by_RCP=by_RCP, by=sort_by, outcomes=outcomes, default_names=default_names ) - if get(opts, :summarize, true) scenarios_confint!(ax, outcomes, scen_groups, group_names) else @@ -211,9 +210,8 @@ function scenarios_confint!( confints::AbstractArray, ordered_groups::Vector{Symbol}, _colors::Dict{Symbol,T}; - x_vals::Union{Vector{Int64},Vector{Float64}}=collect(1:size(confints, 1)), + x_vals::Union{Vector{Int64},Vector{Float64}}=collect(1:size(confints, 1)) )::Nothing where {T<:Union{RGBA{Float32},String,Symbol}} - for idx in eachindex(ordered_groups) band_color = (_colors[ordered_groups[idx]], 0.4) y_lower, y_upper = confints[:, idx, 1], confints[:, idx, 3] @@ -248,7 +246,7 @@ function scenarios_series!( scen_groups::Dict{Symbol,BitVector}, group_names::Vector{Symbol}; series_opts::OPT_TYPE=DEFAULT_OPT_TYPE(), - x_vals::T=collect(1:size(outcomes, 1)), + x_vals::T=collect(1:size(outcomes, 1)) )::Nothing where {T<:Union{Vector{Int64},Vector{Float64}}} _colors::Dict{Symbol,Union{Symbol,RGBA{Float32}}} = colors(scen_groups) _alphas::Dict{Symbol,Float64} = alphas(scen_groups) @@ -283,7 +281,7 @@ function scenarios_hist( ylims!( ax_hist, minimum(scen_dist) - quantile(scen_dist, 0.05), - maximum(scen_dist) + quantile(scen_dist, 0.05), + maximum(scen_dist) + quantile(scen_dist, 0.05) ) return nothing @@ -329,14 +327,14 @@ function _sort_keys( !isempty(default_names) && return default_names default_keys = [:counterfactual, :unguided, :guided] - return default_keys[default_keys.∈[scen_types]] + return default_keys[default_keys .∈ [scen_types]] elseif by == :variance msg = "When sorting by variance, optional parameter `outcomes` must be provided" isempty(outcomes) && throw(ArgumentError(msg)) return sort( scen_types; by=type -> sum(var(outcomes[:, scenario_types[type]]; dims=2)), - rev=true, + rev=true ) elseif by == :size msg = "When sorting by size, optional parameter `outcomes` must be provided" @@ -345,6 +343,10 @@ function _sort_keys( scen_types; by=type -> size(outcomes[:, scenario_types[type]], 2), rev=true ) else - throw(ArgumentError("Invalid 'by' option. Must be one of: [:default, :variance, :size]")) + throw( + ArgumentError( + "Invalid 'by' option. Must be one of: [:default, :variance, :size]" + ) + ) end end diff --git a/ext/AvizExt/viz/sensitivity.jl b/ext/AvizExt/viz/sensitivity.jl index 9d6a61ccb..46a8180ae 100644 --- a/ext/AvizExt/viz/sensitivity.jl +++ b/ext/AvizExt/viz/sensitivity.jl @@ -34,7 +34,7 @@ function ADRIA.viz.pawn!( g::Union{GridLayout,GridPosition}, Si::YAXArray; opts::OPT_TYPE=DEFAULT_OPT_TYPE(), - axis_opts::OPT_TYPE=DEFAULT_OPT_TYPE(), + axis_opts::OPT_TYPE=DEFAULT_OPT_TYPE() ) xtick_rot = get(axis_opts, :xticklabelrotation, 2.0 / π) @@ -50,14 +50,14 @@ function ADRIA.viz.pawn!( # Sort by sort_by = get(opts, :by, :median) - Si = Si[sortperm(Si[Si=At(sort_by)], rev=true), :] + Si = Si[sortperm(Si[Si=At(sort_by)]; rev=true), :] y, x = Si.axes ax = Axis( - g[1, 1], + g[1, 1]; xticks=(1:length(x), string.(x)), yticks=(1:length(y), string.(y)), - xticklabelrotation=xtick_rot; + xticklabelrotation=xtick_rot, axis_opts... ) ax.yreversed = true @@ -113,11 +113,11 @@ function ADRIA.viz.tsa!( factors, Si, timesteps = si.axes x_tickpos, x_ticklabel = _time_labels(timesteps) ax = Axis( - g[1, 1], + g[1, 1]; xticks=(x_tickpos, x_ticklabel), xticklabelrotation=xtick_rot, xlabel=xlabel, - ylabel=ylabel; + ylabel=ylabel, axis_opts... ) @@ -131,11 +131,11 @@ function ADRIA.viz.tsa!( # min_step = (1 / 0.05) # color_weight = min((1.0 / (length(factors) / min_step)), 0.6) comps = unique(all_comps) - dc = distinguishable_colors(length(comps), [RGB(1, 1, 1), RGB(0, 0, 0)], dropseed=true) + dc = distinguishable_colors(length(comps), [RGB(1, 1, 1), RGB(0, 0, 0)]; dropseed=true) lns = Plot[ series!( ax, - si[Si=At(stat)][findall(all_comps .== _cmp), :], + si[Si=At(stat)][findall(all_comps .== _cmp), :]; labels=repeat([_cmp], count(all_comps .== _cmp)), solid_color=(dc[i], 0.2) ) @@ -184,7 +184,7 @@ function ADRIA.viz.rsa!( si::Dataset, factors::Vector{Symbol}; opts::OPT_TYPE=DEFAULT_OPT_TYPE(), - axis_opts::OPT_TYPE=DEFAULT_OPT_TYPE(), + axis_opts::OPT_TYPE=DEFAULT_OPT_TYPE() ) n_factors::Int64 = length(factors) if n_factors > 30 @@ -247,7 +247,7 @@ function ADRIA.viz.rsa!( ax::Axis = Axis( g[r, c]; title=h_names[f_names .== f_name][1], - axis_opts..., + axis_opts... ) scatterlines!(ax, fv_s, collect(si[f_name]); markersize=15) @@ -265,11 +265,11 @@ function ADRIA.viz.rsa!( end linkyaxes!(axs...) - Label(g[end+1, :], text=xlabel, fontsize=32) + Label(g[end + 1, :]; text=xlabel, fontsize=32) Label(g[1:(end - 1), 0]; text=ylabel, fontsize=32, rotation=π / 2.0) if :title in keys(axis_opts) - Label(g[0, :], text=title_val, fontsize=40) + Label(g[0, :]; text=title_val, fontsize=40) end # Clear empty figures @@ -317,7 +317,7 @@ function ADRIA.viz.outcome_map!( outcomes::YAXArray, factors::Vector{Symbol}; opts::OPT_TYPE=DEFAULT_OPT_TYPE(), - axis_opts::OPT_TYPE=DEFAULT_OPT_TYPE(), + axis_opts::OPT_TYPE=DEFAULT_OPT_TYPE() ) # TODO: Clean up and compartmentalize as a lot of code here are duplicates of those # found in `rsa()` @@ -384,9 +384,11 @@ function ADRIA.viz.outcome_map!( ax, fv_s[.!ismissing.(outcomes[factors=At(f_name), CI=At(:lower)])], collect(skipmissing(outcomes[factors=At(f_name), CI=At(:lower)])), - collect(skipmissing(outcomes[factors=At(f_name), CI=At(:upper)])), + collect(skipmissing(outcomes[factors=At(f_name), CI=At(:upper)])) + ) + scatterlines!( + ax, fv_s, outcomes[factors=At(f_name), CI=At(:mean)]; markersize=15 ) - scatterlines!(ax, fv_s, outcomes[factors=At(f_name), CI=At(:mean)]; markersize=15) if f_name == :guided ax.xticks = (fv_s, fv_labels) @@ -439,7 +441,7 @@ function ADRIA.viz.outcome_map( factors::Vector{Symbol}; opts::OPT_TYPE=DEFAULT_OPT_TYPE(), fig_opts::OPT_TYPE=DEFAULT_OPT_TYPE(), - axis_opts::OPT_TYPE=DEFAULT_OPT_TYPE(), + axis_opts::OPT_TYPE=DEFAULT_OPT_TYPE() ) f = Figure(; fig_opts...) g = f[1, 1] = GridLayout() @@ -471,7 +473,7 @@ function _series_convergence( Si_conv::YAXArray, factors::Vector{Symbol}; opts::OPT_TYPE=DEFAULT_OPT_TYPE(:plot_overlay => true), - axis_opts::OPT_TYPE=DEFAULT_OPT_TYPE(), + axis_opts::OPT_TYPE=DEFAULT_OPT_TYPE() ) plot_overlay = get(opts, :plot_overlay, true) n_scenarios = collect(lookup(Si_conv, :n_scenarios)) @@ -492,7 +494,7 @@ function _series_convergence( permutedims(Si_conv[Si=At([:lb, :median, :ub])], (3, 1, 2)).data, collect(keys(grps)), _colors; - x_vals=n_scenarios, + x_vals=n_scenarios ) ax.xlabel = xlabel ax.ylabel = ylabel @@ -521,7 +523,7 @@ function _series_convergence( ax, n_scenarios, Si_conv[Si=At(:median)][factors=At(factors[step])].data; - color=(_colors[factors[step]], _alphas[factors[step]]), + color=(_colors[factors[step]], _alphas[factors[step]]) ) band!( @@ -529,7 +531,7 @@ function _series_convergence( n_scenarios, Si_conv[Si=At(:lb), factors=At(factors[step])].data, Si_conv[Si=At(:ub), factors=At(factors[step])].data; - color=(_colors[factors[step]], _alphas[factors[step]]), + color=(_colors[factors[step]], _alphas[factors[step]]) ) step += 1 push!(axs, ax) @@ -566,7 +568,6 @@ function _series_convergence( rethrow(err) end end - end return g end @@ -614,7 +615,7 @@ function _heatmap_convergence( ylabel=y_label, xlabelsize=x_labelsize, ylabelsize=y_labelsize, - axis_opts..., + axis_opts... ) heatmap!(ax, z') colorbar_label = get(opts, :colorbar_label, "Relative Sensitivity") @@ -661,7 +662,7 @@ function ADRIA.viz.convergence( Si_conv, factors; opts=opts, - axis_opts=axis_opts, + axis_opts=axis_opts ) return f end @@ -680,5 +681,4 @@ function ADRIA.viz.convergence!( else error("Convergence plot $(viz_type) is not expected.") end - end diff --git a/ext/AvizExt/viz/spatial.jl b/ext/AvizExt/viz/spatial.jl index 2d6556ee2..adb26234a 100644 --- a/ext/AvizExt/viz/spatial.jl +++ b/ext/AvizExt/viz/spatial.jl @@ -3,7 +3,6 @@ using Graphs, GraphMakie, SimpleWeightedGraphs using ADRIA: _get_geom_col - """ _get_geoms(gdf::DataFrame) @@ -41,7 +40,6 @@ function set_axis_defaults(axis_opts::OPT_TYPE)::OPT_TYPE return axis_opts end - """ create_map!( f::Union{GridLayout,GridPosition}, @@ -375,7 +373,9 @@ function ADRIA.viz.connectivity( fig_opts::OPT_TYPE=set_figure_defaults(DEFAULT_OPT_TYPE()), axis_opts::OPT_TYPE=set_axis_defaults(DEFAULT_OPT_TYPE()) ) - return ADRIA.viz.connectivity(dom, dom.conn; in_method, out_method, opts, fig_opts, axis_opts) + return ADRIA.viz.connectivity( + dom, dom.conn; in_method, out_method, opts, fig_opts, axis_opts + ) end function ADRIA.viz.connectivity( dom::Domain, @@ -390,7 +390,9 @@ function ADRIA.viz.connectivity( @warn "Both in and out centrality measures provided. Plotting out centralities." _, conn_weight, network = ADRIA.connectivity_strength(conn; in_method, out_method) elseif !isnothing(in_method) && isnothing(out_method) - conn_weight, _, network = ADRIA.connectivity_strength(conn; in_method, out_method=outdegree_centrality) + conn_weight, _, network = ADRIA.connectivity_strength( + conn; in_method, out_method=outdegree_centrality + ) elseif isnothing(in_method) && isnothing(out_method) error("Measure for in or out centralities needs to be provided.") else @@ -398,7 +400,9 @@ function ADRIA.viz.connectivity( in_method = indegree_centrality end - _, conn_weight, network = ADRIA.connectivity_strength(conn; in_method, out_method=outdegree_centrality) + _, conn_weight, network = ADRIA.connectivity_strength( + conn; in_method, out_method=outdegree_centrality + ) end return ADRIA.viz.connectivity(dom, network, conn_weight; opts, fig_opts, axis_opts) diff --git a/ext/AvizExt/viz/taxa_dynamics.jl b/ext/AvizExt/viz/taxa_dynamics.jl index b98acb509..98b00cc55 100644 --- a/ext/AvizExt/viz/taxa_dynamics.jl +++ b/ext/AvizExt/viz/taxa_dynamics.jl @@ -42,7 +42,11 @@ function ADRIA.viz.taxonomy( series_opts::OPT_TYPE=DEFAULT_OPT_TYPE() )::Figure if !haskey(rs.outcomes, :relative_taxa_cover) - throw(ArgumentError("Unable to found relative_taxa_cover in outcomes. This variable may be passed manually.")) + throw( + ArgumentError( + "Unable to found relative_taxa_cover in outcomes. This variable may be passed manually." + ) + ) end return ADRIA.viz.taxonomy( rs.inputs, @@ -76,7 +80,7 @@ function ADRIA.viz.taxonomy( ADRIA.viz.taxonomy!( g, relative_taxa_cover, - scen_groups, + scen_groups; opts=opts, axis_opts=axis_opts, series_opts=series_opts @@ -87,7 +91,7 @@ end function ADRIA.viz.taxonomy!( g::Union{GridLayout,GridPosition}, relative_taxa_cover::YAXArray, - scen_groups::Dict{Symbol, BitVector}; + scen_groups::Dict{Symbol,BitVector}; opts::OPT_TYPE=DEFAULT_OPT_TYPE(), axis_opts::OPT_TYPE=DEFAULT_OPT_TYPE(), series_opts::OPT_TYPE=DEFAULT_OPT_TYPE() @@ -118,9 +122,10 @@ function ADRIA.viz.taxonomy!( # Use default ADRIA colors for scenario type if use not specified n_scenario_groups::Int64 = length(keys(scen_groups)) color = get(opts, :colors, nothing) - _colors = isnothing(color) ? [ - COLORS[scen_name] for scen_name in keys(scen_groups) - ] : categorical_colors(color, n_scenario_groups) + _colors = + isnothing(color) ? [ + COLORS[scen_name] for scen_name in keys(scen_groups) + ] : categorical_colors(color, n_scenario_groups) # Plot results intervention_by_taxonomy!( @@ -145,7 +150,7 @@ Create a plot for each scenario group, displaying the relative coral cover split functional groups. """ function taxonomy_by_intervention!( - g::Union{GridLayout, GridPosition}, + g::Union{GridLayout,GridPosition}, relative_taxa_cover::YAXArray, scen_groups::Dict{Symbol}, colors::Union{Vector{Symbol},Vector{RGBA{T}}}; @@ -154,14 +159,20 @@ function taxonomy_by_intervention!( series_opts::OPT_TYPE=DEFAULT_OPT_TYPE() )::Nothing where {T<:Float32} # Get taxonomy names for legend - taxa_names = human_readable_name(functional_group_names(), title_case=true) + taxa_names = human_readable_name(functional_group_names(); title_case=true) series_opts[:labels] = get(series_opts, :labels, taxa_names) # Get default axis options xtick_vals = get(axis_opts, :xticks, _time_labels(timesteps(relative_taxa_cover))) xtick_rot = get(axis_opts, :xticklabelrotation, 2 / π) for (idx, scen_name) in enumerate(keys(scen_groups)) - ax = Axis(g[idx, 1]; title=String(scen_name), xticks=xtick_vals, xticklabelrotation=xtick_rot, axis_opts...) + ax = Axis( + g[idx, 1]; + title=String(scen_name), + xticks=xtick_vals, + xticklabelrotation=xtick_rot, + axis_opts... + ) taxonomy_by_intervention!( ax, @@ -170,7 +181,6 @@ function taxonomy_by_intervention!( show_confints=show_confints, series_opts=series_opts ) - end return nothing end @@ -189,7 +199,8 @@ function taxonomy_by_intervention!( confints = zeros(n_timesteps, n_functional_groups, 3) for (idx, taxa) in enumerate(relative_taxa_cover.taxa) confints[:, idx, :] = series_confint(relative_taxa_cover[taxa=At(taxa)]) - show_confints ? band!( + show_confints ? + band!( ax, 1:n_timesteps, confints[:, idx, 1], confints[:, idx, 3]; color=(colors[idx], 0.4) ) : nothing @@ -210,7 +221,7 @@ Plot relative cover, comparing taxa cover between different scenario groups. Eac different coral taxonomy or functional group. """ function intervention_by_taxonomy!( - g::Union{GridLayout, GridPosition}, + g::Union{GridLayout,GridPosition}, relative_taxa_cover::YAXArray, scen_groups::Dict{Symbol,BitVector}, colors::Union{Vector{Symbol},Vector{RGBA{T}}}; @@ -218,7 +229,7 @@ function intervention_by_taxonomy!( axis_opts::OPT_TYPE=DEFAULT_OPT_TYPE(), series_opts::OPT_TYPE=DEFAULT_OPT_TYPE() )::Nothing where {T<:Float32} - taxa_names = human_readable_name(functional_group_names(), title_case=true) + taxa_names = human_readable_name(functional_group_names(); title_case=true) scenario_group_names::Vector{Symbol} = collect(keys(scen_groups)) series_opts[:labels] = get(series_opts, :labels, String.(scenario_group_names)) @@ -226,7 +237,13 @@ function intervention_by_taxonomy!( for (idx, taxa_name) in enumerate(taxa_names) xtick_vals = get(axis_opts, :xticks, _time_labels(timesteps(relative_taxa_cover))) xtick_rot = get(axis_opts, :xticklabelrotation, 2 / π) - ax = Axis(g[idx, 1]; title=taxa_name, xticks=xtick_vals, xticklabelrotation=xtick_rot, axis_opts...) + ax = Axis( + g[idx, 1]; + title=taxa_name, + xticks=xtick_vals, + xticklabelrotation=xtick_rot, + axis_opts... + ) intervention_by_taxonomy!( ax, @@ -236,7 +253,6 @@ function intervention_by_taxonomy!( show_confints=show_confints, series_opts=series_opts ) - end return nothing end @@ -259,7 +275,8 @@ function intervention_by_taxonomy!( confints[:, idx, :] = series_confint( relative_taxa_cover[scenarios=scen_groups[scen]] ) - show_confints ? band!( + show_confints ? + band!( ax, 1:n_timesteps, confints[:, idx, 1], confints[:, idx, 3]; color=(colors[idx], 0.4) ) : nothing diff --git a/ext/AvizExt/viz/viz.jl b/ext/AvizExt/viz/viz.jl index 7c0c0f54b..140539677 100644 --- a/ext/AvizExt/viz/viz.jl +++ b/ext/AvizExt/viz/viz.jl @@ -10,7 +10,6 @@ using using ADRIA: axes_names, ResultSet, metrics.metric_label, analysis.col_normalize, model_spec using .AvizExt - const OPT_TYPE = Dict{Symbol,<:Any} const DEFAULT_OPT_TYPE = Dict{Symbol,Any} diff --git a/src/ADRIA.jl b/src/ADRIA.jl index 596018797..4833d450a 100644 --- a/src/ADRIA.jl +++ b/src/ADRIA.jl @@ -109,7 +109,54 @@ if ccall(:jl_generating_output, Cint, ()) == 1 Base.precompile(Tuple{typeof(load_domain),String,String}) Base.precompile(Tuple{typeof(setup_result_store!),Domain,DataFrame}) # time: 4.6720815 Base.precompile(Tuple{typeof(combine_results),Vector{String}}) # time: 4.0178256 - Base.precompile(Tuple{typeof(growthODE),Matrix{Float64},Matrix{Float64},NamedTuple{(:r, :k, :mb, :comp, :sm_comp, :small_massives, :small, :mid, :large, :acr_5_11, :acr_6_12, :rec, :sigma, :M_sm, :sXr, :X_mb, :cover),Tuple{Matrix{Float64},Vector{Float64},Matrix{Float64},Float64,Matrix{Float64},SVector{3,Int64},SVector{6,Int64},SVector{19,Int64},SVector{4,Int64},SVector{2,Int64},SVector{2,Int64},Matrix{Float64},Matrix{Float64},Matrix{Float64},Matrix{Float64},Matrix{Float64},Vector{Float64}}},Float64}) # time: 1.4354926 + Base.precompile( + Tuple{ + typeof(growthODE), + Matrix{Float64}, + Matrix{Float64}, + NamedTuple{ + ( + :r, + :k, + :mb, + :comp, + :sm_comp, + :small_massives, + :small, + :mid, + :large, + :acr_5_11, + :acr_6_12, + :rec, + :sigma, + :M_sm, + :sXr, + :X_mb, + :cover + ), + Tuple{ + Matrix{Float64}, + Vector{Float64}, + Matrix{Float64}, + Float64, + Matrix{Float64}, + SVector{3,Int64}, + SVector{6,Int64}, + SVector{19,Int64}, + SVector{4,Int64}, + SVector{2,Int64}, + SVector{2,Int64}, + Matrix{Float64}, + Matrix{Float64}, + Matrix{Float64}, + Matrix{Float64}, + Matrix{Float64}, + Vector{Float64} + } + }, + Float64 + } + ) # time: 1.4354926 Base.precompile( Tuple{ typeof(decision.rank_sites!), @@ -118,8 +165,8 @@ if ccall(:jl_generating_output, Cint, ()) == 1 Matrix{Int64}, Int64, typeof(decision.adria_topsis), - Int64, - }, + Int64 + } ) # time: 0.3518593 Base.precompile( Tuple{ @@ -129,12 +176,39 @@ if ccall(:jl_generating_output, Cint, ()) == 1 Matrix{Int64}, Int64, typeof(decision.adria_vikor), - Int64, - }, + Int64 + } ) # time: 0.3170264 - Base.precompile(Tuple{typeof(scenario_attributes),String,String,Vector{String},String,EnvLayer{String,Vector{Int64}},SimConstants,Vector{String},Vector{Float64},Vector{Float64},Vector{Tuple{Float64,Float64}}}) # time: 0.2140636 + Base.precompile( + Tuple{ + typeof(scenario_attributes), + String, + String, + Vector{String}, + String, + EnvLayer{String,Vector{Int64}}, + SimConstants, + Vector{String}, + Vector{Float64}, + Vector{Float64}, + Vector{Tuple{Float64,Float64}} + } + ) # time: 0.2140636 Base.precompile(Tuple{typeof(model_spec),Model}) # time: 0.1997914 - Base.precompile(Tuple{typeof(bleaching_mortality!),Matrix{Float64},Matrix{Float64},Vector{Float64},Int64,Vector{Float64},Vector{Float64},Vector{Float64},Vector{Float64},Float64}) # time: 0.1940948 + Base.precompile( + Tuple{ + typeof(bleaching_mortality!), + Matrix{Float64}, + Matrix{Float64}, + Vector{Float64}, + Int64, + Vector{Float64}, + Vector{Float64}, + Vector{Float64}, + Vector{Float64}, + Float64 + } + ) # time: 0.1940948 Base.precompile( Tuple{ typeof(decision.decision_matrix), @@ -149,22 +223,52 @@ if ccall(:jl_generating_output, Cint, ()) == 1 Vector{Float64}, Matrix{Float64}, Matrix{Float64}, - Float64, - }, + Float64 + } ) # time: 0.1929096 - Base.precompile(Tuple{typeof(scenario_attributes),String,String,Vector{String},String,EnvLayer{String,Vector{Any}},Dict{String,Any},Vector{Any},Vector{Float64},Vector{Float64},Vector{Any}}) # time: 0.1755622 + Base.precompile( + Tuple{ + typeof(scenario_attributes), + String, + String, + Vector{String}, + String, + EnvLayer{String,Vector{Any}}, + Dict{String,Any}, + Vector{Any}, + Vector{Float64}, + Vector{Float64}, + Vector{Any} + } + ) # time: 0.1755622 Base.precompile(Tuple{typeof(proportional_adjustment!),Matrix{Float64},Vector{Float64}}) # time: 0.1680073 Base.precompile(Tuple{typeof(_remove_workers)}) # time: 0.1593244 Base.precompile(Tuple{typeof(_setup_workers)}) # time: 0.1571776 Base.precompile(Tuple{typeof(switch_RCPs!),Domain,String}) # time: 0.1284853 # Base.precompile(Tuple{typeof(component_params),DataFrame,Type{CriteriaWeights}}) # time: 0.1223987 - Base.precompile(Tuple{Type{Domain},String,String,String,Vector{Int64},String,String,String,String,String,String,String}) # time: 0.1113899 + Base.precompile( + Tuple{ + Type{Domain}, + String, + String, + String, + Vector{Int64}, + String, + String, + String, + String, + String, + String, + String + } + ) # time: 0.1113899 Base.precompile(Tuple{typeof(setup_cache),Domain}) # time: 0.1060752 - Base.precompile(EnvLayer, (String, String, String, String, String, String, String, String, Any)) + Base.precompile( + EnvLayer, (String, String, String, String, String, String, String, String, Any) + ) Base.precompile(load_results, (String,)) end - # @setup_workload begin # # Putting some things in `setup` can reduce the size of the # # precompile file and potentially make loading faster. diff --git a/src/Domain.jl b/src/Domain.jl index b92e7b995..261adfb0d 100644 --- a/src/Domain.jl +++ b/src/Domain.jl @@ -68,7 +68,7 @@ function model_spec(d::Domain, filepath::String)::Nothing end ms = model_spec(d) - ms[!, :] .= replace(Matrix(ms), nothing=>"") + ms[!, :] .= replace(Matrix(ms), nothing => "") CSV.write(filepath, ms; header=true, append=true) return nothing @@ -78,7 +78,8 @@ function model_spec(m::Model)::DataFrame dist_params = spec[!, :dist_params] DataFrames.hcat!( - spec, DataFrame( + spec, + DataFrame( :lower_bound => first.(dist_params), :upper_bound => getindex.(dist_params, 2) ) @@ -132,13 +133,13 @@ function component_params(m::Model, component)::DataFrame return component_params(model_spec(m), component) end function component_params(spec::DataFrame, component)::DataFrame - return spec[spec.component.==replace.(string(component), "ADRIA." => ""), :] + return spec[spec.component .== replace.(string(component), "ADRIA." => ""), :] end function component_params(m::Model, components::Vector{T})::DataFrame where {T} return component_params(model_spec(m), components) end function component_params(spec::DataFrame, components::Vector{T})::DataFrame where {T} - return spec[spec.component.∈[replace.(string.(components), "ADRIA." => "")], :] + return spec[spec.component .∈ [replace.(string.(components), "ADRIA." => "")], :] end """ @@ -206,7 +207,7 @@ over species. Leftover space ∈ [0, 1] """ function relative_leftover_space( - loc_coral_cover::AbstractArray, + loc_coral_cover::AbstractArray )::AbstractArray return max.(1.0 .- loc_coral_cover, 0.0) end @@ -243,4 +244,4 @@ function update!(dom::Domain, spec::DataFrame)::Nothing end # Dummy interface to allow precompilation -function switch_RCPs!() end \ No newline at end of file +function switch_RCPs!() end diff --git a/src/ExtInterface/ADRIA/Domain.jl b/src/ExtInterface/ADRIA/Domain.jl index 13dae88de..9533bec0c 100644 --- a/src/ExtInterface/ADRIA/Domain.jl +++ b/src/ExtInterface/ADRIA/Domain.jl @@ -51,7 +51,7 @@ function Domain( removed_sites::Vector{String}, DHW::YAXArray, wave::YAXArray, - cyclone_mortality::YAXArray, + cyclone_mortality::YAXArray )::ADRIADomain where {T<:Union{Float32,Float64}} sim_constants::SimConstants = SimConstants() criteria_weights::Vector{Union{DecisionWeights,DecisionThresholds}} = [ @@ -64,7 +64,7 @@ function Domain( EnvironmentalLayer(DHW, wave, cyclone_mortality), Intervention(), criteria_weights..., - Coral(), + Coral() )) return ADRIADomain( name, @@ -83,7 +83,7 @@ function Domain( wave, cyclone_mortality, model, - sim_constants, + sim_constants ) end @@ -118,7 +118,7 @@ function Domain( conn_path::String, dhw_fn::String, wave_fn::String, - cyclone_mortality_fn::String, + cyclone_mortality_fn::String )::ADRIADomain local site_data::DataFrame try @@ -148,7 +148,7 @@ function Domain( conn_path, dhw_fn, wave_fn, - timeframe, + timeframe ) # Sort data to maintain consistent order @@ -183,7 +183,8 @@ function Domain( waves_params = ispath(wave_fn) ? (wave_fn, "Ub") : (timeframe, conn_ids) waves = load_env_data(waves_params...) - cyc_params = ispath(cyclone_mortality_fn) ? (cyclone_mortality_fn,) : (timeframe, site_data) + cyc_params = + ispath(cyclone_mortality_fn) ? (cyclone_mortality_fn,) : (timeframe, site_data) cyclone_mortality = load_cyclone_mortality(cyc_params...) # Add compatability with non-migrated datasets but always default current coral spec @@ -215,7 +216,7 @@ function Domain( connectivity.truncated, dhw, waves, - cyclone_mortality, + cyclone_mortality ) end @@ -279,7 +280,7 @@ function load_domain(::Type{ADRIADomain}, path::String, rcp::String)::ADRIADomai conn_path, dhw_fn, wave_fn, - cyclone_mortality_fn, + cyclone_mortality_fn ) end function load_domain(path::String, rcp::String)::ADRIADomain diff --git a/src/ExtInterface/ReefMod/RMEDomain.jl b/src/ExtInterface/ReefMod/RMEDomain.jl index d9ba9187a..48b838761 100644 --- a/src/ExtInterface/ReefMod/RMEDomain.jl +++ b/src/ExtInterface/ReefMod/RMEDomain.jl @@ -49,13 +49,13 @@ Standardize cluster id column name """ function _standardize_cluster_ids!(spatial_data::DataFrame)::Nothing try - rename!(spatial_data, Dict("LOC_NAME_S"=>"cluster_id")) + rename!(spatial_data, Dict("LOC_NAME_S" => "cluster_id")) catch err if !(err isa ArgumentError) rethrow(err) end - rename!(spatial_data, Dict("reef_name"=>"cluster_id")) + rename!(spatial_data, Dict("reef_name" => "cluster_id")) end return nothing @@ -71,7 +71,9 @@ function _manual_id_corrections!(spatial_data::DataFrame, id_list::DataFrame)::N # Re-order spatial data to match RME dataset # MANUAL CORRECTION spatial_data[spatial_data.LABEL_ID .== "20198", :LABEL_ID] .= "20-198" - id_order = [first(findall(x .== spatial_data.LABEL_ID)) for x in string.(id_list[:, 1])] + id_order = [ + first(findall(x .== spatial_data.LABEL_ID)) for x in string.(id_list[:, 1]) + ] spatial_data[!, :] = spatial_data[id_order, :] # Check that the two lists of location ids are identical @@ -154,7 +156,9 @@ function load_domain(::Type{RMEDomain}, fn_path::String, RCP::String)::RMEDomain spatial_data[:, :k] .= 1.0 .- id_list[:, 3] # Need to load initial coral cover after we know `k` area. - init_coral_cover::YAXArray{Float64} = load_initial_cover(RMEDomain, data_files, loc_ids, spatial_data) + init_coral_cover::YAXArray{Float64} = load_initial_cover( + RMEDomain, data_files, loc_ids, spatial_data + ) conn_data::YAXArray{Float64} = load_connectivity(RMEDomain, data_files, loc_ids) @@ -185,7 +189,8 @@ function load_domain(::Type{RMEDomain}, fn_path::String, RCP::String)::RMEDomain functional_groups = functional_group_names() cyc_scens::YAXArray{Float64} = ZeroDataCube(; - T=Float64, timesteps=timeframe_range, locs=loc_ids, species=functional_groups, scenarios=[1] + T=Float64, timesteps=timeframe_range, locs=loc_ids, species=functional_groups, + scenarios=[1] ) env_md = EnvLayer( @@ -197,7 +202,7 @@ function load_domain(::Type{RMEDomain}, fn_path::String, RCP::String)::RMEDomain "", "", "", - timeframe_range, + timeframe_range ) criteria_weights::Vector{Union{DecisionWeights,DecisionThresholds}} = [ @@ -229,7 +234,7 @@ function load_domain(::Type{RMEDomain}, fn_path::String, RCP::String)::RMEDomain wave_scens, cyc_scens, model, - SimConstants(), + SimConstants() ) end @@ -278,7 +283,7 @@ function load_DHW( tf_start = findall(timeframe[1] .∈ data_tf)[1] tf_end = findall(timeframe[2] .∈ data_tf)[1] - d1 = first_file[:, (tf_start+1):(tf_end+1)] + d1 = first_file[:, (tf_start + 1):(tf_end + 1)] data_shape = reverse(size(d1)) data_cube = zeros(data_shape..., length(rcp_files)) data_cube[:, :, 1] .= Matrix(d1)' @@ -308,7 +313,7 @@ function load_DHW( continue end - data_cube[:, :, i+1] .= Matrix(d[:, tf_start:tf_end])' + data_cube[:, :, i + 1] .= Matrix(d[:, tf_start:tf_end])' end # Only return valid scenarios @@ -316,7 +321,7 @@ function load_DHW( data_cube[:, :, keep_ds]; timesteps=timeframe[1]:timeframe[2], locs=loc_ids, - scenarios=rcp_files[keep_ds], + scenarios=rcp_files[keep_ds] ) end @@ -366,7 +371,7 @@ function load_connectivity( return DataCube( conn_data; Source=loc_ids, - Sink=loc_ids, + Sink=loc_ids ) end @@ -388,7 +393,7 @@ function load_cyclones( ::Type{RMEDomain}, data_path::String, loc_ids::Vector{String}, - tf::Tuple{Int64,Int64}, + tf::Tuple{Int64,Int64} )::YAXArray # NOTE: This reads from the provided CSV files # Replace with approach that reads directly from binary files @@ -409,12 +414,12 @@ function load_cyclones( end # Cut down to the given time frame assuming the first entry represents the first index - cyc_data = permutedims(cyc_data, (2, 1, 3))[1:((tf[2]-tf[1])+1), :, :] + cyc_data = permutedims(cyc_data, (2, 1, 3))[1:((tf[2] - tf[1]) + 1), :, :] return DataCube( cyc_data; timesteps=tf[1]:tf[2], locs=loc_ids, - scenarios=1:length(cyc_files), + scenarios=1:length(cyc_files) ) end @@ -459,8 +464,8 @@ function load_initial_cover( # Find integral density between bounds of each size class areas to create weights for each size class. cdf_integral = cdf.(reef_mod_area_dist, bin_edges_area) - size_class_weights = (cdf_integral[:, 2:end] .- cdf_integral[:, 1:(end-1)]) - size_class_weights = size_class_weights ./ sum(size_class_weights, dims=2) + size_class_weights = (cdf_integral[:, 2:end] .- cdf_integral[:, 1:(end - 1)]) + size_class_weights = size_class_weights ./ sum(size_class_weights; dims=2) # Take the mean over repeats, as suggested by YM (pers comm. 2023-02-27 12:40pm AEDT). # Convert from percent to relative values. @@ -484,8 +489,8 @@ function load_initial_cover( return DataCube( icc_data; - species=1:(length(icc_files)*n_sizes), - locs=loc_ids, + species=1:(length(icc_files) * n_sizes), + locs=loc_ids ) end diff --git a/src/ExtInterface/ReefMod/ReefModDomain.jl b/src/ExtInterface/ReefMod/ReefModDomain.jl index 6c1149f36..226605b9c 100644 --- a/src/ExtInterface/ReefMod/ReefModDomain.jl +++ b/src/ExtInterface/ReefMod/ReefModDomain.jl @@ -12,7 +12,6 @@ import ArchGDAL: createpoint import YAXArrays.DD: At - mutable struct ReefModDomain <: AbstractReefModDomain const name::String RCP::String @@ -85,7 +84,7 @@ function load_domain( id_list_fn, DataFrame; header=false, - comment="#", + comment="#" ) _manual_id_corrections!(spatial_data, id_list) @@ -140,7 +139,7 @@ function load_domain( "", "", "", - timeframe[1]:timeframe[2], + timeframe[1]:timeframe[2] ) criteria_weights::Vector{Union{DecisionWeights,DecisionThresholds}} = [ @@ -179,7 +178,7 @@ function load_domain( wave_scens, cyclone_mortality_scens, model, - SimConstants(), + SimConstants() ) end @@ -214,13 +213,14 @@ function load_initial_cover( # Find integral density between bounds of each size class areas to create weights for each size class. cdf_integral = cdf.(reef_mod_area_dist, bin_edges_area) - size_class_weights = (cdf_integral[2:end] .- cdf_integral[1:(end-1)]) + size_class_weights = (cdf_integral[2:end] .- cdf_integral[1:(end - 1)]) size_class_weights = size_class_weights ./ sum(size_class_weights) # Take the mean over repeats, as suggested by YM (pers comm. 2023-02-27 12:40pm AEDT). # Convert from percent to relative values. # YAXArray ordering is [time ⋅ location ⋅ scenario] - icc_data = ((dropdims(mean(init_cc_per_taxa; dims=:scenario); dims=:scenario)) ./ 100.0).data + icc_data = + ((dropdims(mean(init_cc_per_taxa; dims=:scenario); dims=:scenario)) ./ 100.0).data # Repeat species over each size class and reshape to give ADRIA compatible size (36 * n_locs). # Multiply by size class weights to give initial cover distribution over each size class. @@ -231,14 +231,14 @@ function load_initial_cover( n_species = length(init_cc_per_taxa[location=1, group=:, scenario=1]) - return DataCube(icc_data; species=1:(n_species*6), locs=loc_ids) + return DataCube(icc_data; species=1:(n_species * 6), locs=loc_ids) end """ _find_file(dir::String)::String """ function _find_file(dir::String, ident::Union{Regex,String})::String - pos_files = filter(isfile, readdir(dir, join=true)) + pos_files = filter(isfile, readdir(dir; join=true)) pos_files = filter(x -> occursin(ident, x), pos_files) if length(pos_files) == 0 ArgumentError("Unable to find file in $(dir)") @@ -249,7 +249,7 @@ function _find_file(dir::String, ident::Union{Regex,String})::String end function _find_netcdf(dir::String, scenario::String)::String - pos_files = filter(isfile, readdir(dir, join=true)) + pos_files = filter(isfile, readdir(dir; join=true)) pos_files = filter(x -> occursin(".nc", x), pos_files) pos_files = filter(x -> occursin(scenario, x), pos_files) if length(pos_files) == 0 @@ -270,14 +270,14 @@ function switch_RCPs!(d::ReefModDomain, RCP::String)::ReefModDomain new_scen_dataset = open_dataset(new_scen_fn) dhws = Cube( - new_scen_dataset[["record_applied_DHWs"]] - )[timestep=At(d.env_layer_md.timeframe)].data[:, :, :] + new_scen_dataset[["record_applied_DHWs"]] +)[timestep=At(d.env_layer_md.timeframe)].data[:, :, :] scens = 1:size(dhws)[3] loc_ids = d.site_ids d.dhw_scens = DataCube( - dhws, + dhws; timesteps=d.env_layer_md.timeframe, locs=loc_ids, scenarios=scens @@ -315,9 +315,10 @@ function _cyclone_mortality_scens( timeframe::Tuple{Int64,Int64} )::YAXArray{Float64} # Add 1 to every scenarios so they represent indexes in cyclone_mr vectors - cyclone_scens::YAXArray = Cube( - dom_dataset[["record_applied_cyclone"]] - )[timestep=At(timeframe[1]:timeframe[2])] .+ 1 + cyclone_scens::YAXArray = + Cube( + dom_dataset[["record_applied_cyclone"]] + )[timestep=At(timeframe[1]:timeframe[2])] .+ 1 species::Vector{Symbol} = functional_group_names() cyclone_mortality_scens::YAXArray{Float64} = ZeroDataCube(; diff --git a/src/analysis/analysis.jl b/src/analysis/analysis.jl index 3a689cf80..b24fed266 100644 --- a/src/analysis/analysis.jl +++ b/src/analysis/analysis.jl @@ -12,7 +12,9 @@ using YAXArrays Normalize a matrix on a per-column basis (∈ [0, 1]). """ -function col_normalize(data::AbstractMatrix{T})::AbstractMatrix{T} where {T<:Union{Missing,Real}} +function col_normalize( + data::AbstractMatrix{T} +)::AbstractMatrix{T} where {T<:Union{Missing,Real}} local d try d = copy(data) @@ -26,7 +28,9 @@ function col_normalize(data::AbstractMatrix{T})::AbstractMatrix{T} where {T<:Uni return d end -function col_normalize(data::AbstractVector{T})::AbstractVector{T} where {T<:Union{Missing,Real}} +function col_normalize( + data::AbstractVector{T} +)::AbstractVector{T} where {T<:Union{Missing,Real}} return normalize(data) end @@ -60,7 +64,7 @@ function normalize!(data::AbstractArray{T})::AbstractArray{T} where {T<:Union{Mi return data end - data .= (data .- mi) ./ (ma - mi) + return data .= (data .- mi) ./ (ma - mi) end """ @@ -74,13 +78,13 @@ S-1 := 0.9 - 0.8 etc """ function discretize_outcomes(y; S=20) - steps = 0.0:(1/S):1.0 + steps = 0.0:(1 / S):1.0 y_s_hat = col_normalize(y) y_disc = zeros(size(y)...) for i in axes(steps, 1)[2:end] Threads.@threads for j in size(y_s_hat, 2) - y_disc[steps[i-1]. ce = complexity([[1, 2, 3] [1, 3, 4]]) -julia> correction_factor(ce[1], ce[2]) -Float64: - 2.5 - ``` -""" -function correction_factor(ce_i::T, ce_j::T)::Float64 where {T<:Real} - return max(ce_i, ce_j) / min(ce_i, ce_j) -end - -""" - complexity_invariance_distance(data::AbstractMatrix{<:Real}; distance=:euclidean)::AbstractMatrix{Float64} - -Compute Complexity Invariance Distance (CID) between every matrix column pairs (`data`) of -shape \$T ⋅ S\$. The distance between every two series is the weighted euclidian distanced -multiplied by the correction factor, which takes into account the ration between the two -series complexities. Returns a matrix of distances (\$S ⋅ S\$). - -# Arguments -- `data` : Matrix of \$T ⋅ S\$, where \$T\$ is total number of time steps and \$S\$ is -number of scenarios -- `distance` : Switch between Euclidean (`:euclidean`) or weighted Euclidean (`:weuclidean`) -distance measurements. Defaults to `:euclidean` - -# Returns -Matrix of complexity invariance distances. -""" -function complexity_invariance_distance( - data::AbstractMatrix{<:Real}; - distance=:euclidean -)::AbstractMatrix{Float64} - # Compute complexity vector - complexity = _complexity(data) - - # Create empty Matrix - data_size = size(data, 2) - cid_matrix::AbstractMatrix{Float64} = zeros(data_size, data_size) - - local weights::Vector{Float64} - if distance == :weuclidean - # [1, 1/2, 1/3, ..., 1/n] - weights = sqrt.(1 ./ (1:size(data, 1))) - end - dist_fn(x, y) = (distance == :euclidean) ? euclidean(x, y) : weuclidean(x, y, weights) - - #? Do we want to normalize the amplitudes of all series? - # Iterate over data matrix to compute CID (Complexity Invariance Distance) - for i in axes(data, 2) - @floop for j in axes(data, 2) - ed = dist_fn(data[:, i], data[:, j]) - cf = correction_factor(complexity[i], complexity[j]) - cid_matrix[i, j] = cid_matrix[j, i] = ed * cf - end - end - - return cid_matrix -end - -""" - cluster_series(data::AbstractMatrix{<:Real}, n_clusters::Int64, method::Symbol=:kmedoids, distance::Symbol=:euclidean)::Vector{Int64} - -Hierarchically cluster \$S\$ scenarios with \$T\$ time steps each. - -# Arguments -- `data` : Matrix of \$T ⋅ S\$, where \$T\$ is total number of time steps and \$S\$ is - number of scenarios -- `n_clusters` : Number of clusters determined _a priori_ -- `method` : Clustering method. Defaults to `:kmedoids` -- `distance` : Switch between Euclidean (`:euclidean`) or weighted Euclidean (`:weuclidean`) -distance measurements. Defaults to `:euclidean` - -# Returns -- Cluster ids indicating each scenario cluster assignment. - -# References -1. Steinmann, P., Auping, W.L., Kwakkel, J.H., 2020. - Behavior-based scenario discovery using time series clustering. - Technological Forecasting and Social Change 156, 120052. - https://doi.org/10.1016/j.techfore.2020.120052 - -2. Batista, G.E.A.P.A., Keogh, E.J., Tataw, O.M., de Souza, V.M.A., 2014. - CID: an efficient complexity-invariant distance for time series. - Data Min Knowl Disc 28, 634-669. - https://doi.org/10.1007/s10618-013-0312-3 -""" -function cluster_series( - data::AbstractMatrix{<:Real}, - n_clusters::Int64; - method::Symbol=:kmedoids, - distance::Symbol=:euclidean -)::Vector{Int64} - # Calculate distantes matrix - distances = complexity_invariance_distance(data; distance=distance) - - if method == :kmedoids - return kmedoids(distances, n_clusters).assignments - end - - # Return hierarchical clustering with n_clusters - dendogram = hclust(distances; linkage=:average) - return cutree(dendogram; k=n_clusters) -end - -""" - cluster_scenarios(data::AbstractArray{<:Real}, n_clusters::Int64; method::Symbol=:kmedoids, distance::Symbol=:euclidean)::Array{Int64} - -Alias to cluster_series. - -# Arguments -- `data` : Matrix of \$T ⋅ S\$, where \$T\$ is total number of time steps and \$S\$ is - number of scenarios -- `n_clusters` : Number of clusters determined _a priori_ -- `method` : Clustering method. Defaults to `:kmedoids` -- `distance` : Switch between Euclidean (`:euclidean`) or weighted Euclidean (`:weuclidean`) -distance measurements. Defaults to `:euclidean` - -# Returns -- Cluster ids indicating each scenario cluster assignment. - -# Examples -One can cluster scenarios based on a single Metric, passing a Matrix of outcomes for each -timestep and scenario: - -```julia -# Matrix of outcomes -s_tac = ADRIA.metrics.scenario_total_cover(rs) - -# Cluster scenarios -n_cluster = 6 -clusters = ADRIA.analysis.cluster_series(s_tac, n_clusters) -``` - -And perform multiple clusterings, based on multiple Metrics, passing a 3-dimensional Array -(or YAXArray) of outcomes for each timestep, scenario and Metric. - -```julia -metrics::Vector{ADRIA.metrics.Metric} = [ - ADRIA.metrics.scenario_total_cover, - ADRIA.metrics.scenario_asv, - ADRIA.metrics.scenario_absolute_juveniles, -] - -# 3-dimensional array of outcomes -outcomes = ADRIA.metrics.scenario_outcomes(rs, metrics) - -# Cluster scenarios -num_clusters = 6 -outcomes_clusters = ADRIA.analysis.cluster_scenarios(outcomes, num_clusters) -``` -""" -function cluster_scenarios( - data::AbstractArray{<:Real}, - n_clusters::Int64; - method::Symbol=:kmedoids, - distance::Symbol=:euclidean -)::Array{Int64} - ndims(data) == 2 && return cluster_series(data, n_clusters) - - _, n_scenarios, n_metrics = size(data) - - clusters = zeros(Int64, n_scenarios, n_metrics) - for m in 1:n_metrics - clusters[:, m] = cluster_series(data[:, :, m], n_clusters; method=method, distance=distance) - end - - return clusters -end - -""" - target_clusters(clusters::Vector{T}, outcomes::AbstractMatrix{<:Real}; metric=temporal_variability, size_limit=0.01) where {T<:Int64} - -Cluster scenarios into target and non target based on median outcome temporal variability of -previous time series cluster. - -# Arguments -- `clusters` : Vector with outcome cluster indexes -- `outcomes` : AbstractMatrix of scenario outcomes -- `metric` : Metric used to aggregate outcomes for each cluster -- `size_limit` : This function will iteratively merge the best cluster with the second best - if the fraction of scenarios inside it is below `size_limit` - -# Returns -Vector containing 1's for target and 0's for non-target clusters -""" -function target_clusters( - clusters::Vector{T}, - outcomes::AbstractMatrix{<:Real}; - metric=temporal_variability, - size_limit=0.01, -)::Vector{T} where {T<:Int64} - - # Compute statistic for each cluster - clusters_statistics::Vector{Float64} = [] - for cluster in unique(clusters) - normalized_outcomed = outcomes[:, clusters.==cluster] ./ maximum(outcomes) - statistic = median(metric(normalized_outcomed)) - push!(clusters_statistics, statistic) - end - - target_index = argmax(clusters_statistics) - target_indexes = [target_index] - - # Merge target cluster if it is below 1% of size - sizes = [size(outcomes[:, clusters.==c], 2) for c in unique(clusters)] - target_size = sizes[target_index] / sum(sizes) - while target_size < size_limit - # Nullify target_index to find the next argmax - clusters_statistics[target_index] = 0 - - # Find next best cluster and add to target_indexes - target_index = argmax(clusters_statistics) - push!(target_indexes, target_index) - - # Update target_size with next best cluster size - target_size += sizes[target_index] / sum(sizes) - end - - # Return new clusters vector with only 1 and 0 for target and non-target clusters - return [c ∈ target_indexes ? 1 : 0 for c in clusters] -end - -""" - find_scenarios(outcomes::AbstractMatrix{<:Real}, clusters::Vector{Int64}, filter_func::Function, aggregation_func::Function=temporal_variability)::BitVector - find_scenarios(outcomes::AbstractArray{<:Real,3}, clusters::AbstractMatrix{Int64}, filter_funcs::Vector{Function}; aggregation_function::Function=temporal_variability)::BitVector - find_scenarios(outcomes::AbstractArray{<:Real,3}, clusters::AbstractMatrix{Int64}, filter_func::Function; aggregation_function::Function=temporal_variability)::BitVector - -If outcomes is Matrix of scenario outcomes and clusters is a Vector of clusters: -- Computes a median series for each cluster -- Use aggregation_func to compute a summary statistics for each median series -- Select scenarios for which `filter_func` returns true - -If outcomes is a 3-dimensional array of scenario outcomes: -- Computes a median series for each outcome cluster -- Use aggregation_func to compute a summary statistics for each median series -- Select scenarios for which `filter_func` returns true for each matrix of outcomes -- Select scenarios that were selected for all outcomes - -# Arguments -- `outcomes` : Outcomes for one or more scenario metrics -- `clusters` : Clusters for one or more scenario metric outcomes -- `filter_funcs` : Function used to filter/target clusters -- `aggregation_function` : Function used to aggregate each median temporal series into a - single number (default is temporal_variability) - -# Returns -BitVector with true/false for selected/not selected scenarios - -# Examples -```julia -metrics = [ - ADRIA.metrics.scenario_total_cover, - ADRIA.metrics.scenario_asv -] - -# Get outcomes -outcomes = ADRIA.metrics.scenario_outcomes(rs, metrics) -num_clusters = 6 - -# Cluster scenarios based on outcomes -outcomes_clusters = ADRIA.analysis.cluster_scenarios(outcomes, num_clusters) - -# Find scenarios above 0.25-quantile for all metrics -robustness_func(x) = x .>= quantile(x, 0.25) -robust_scens = ADRIA.analysis.find_scenarios(outcomes, outcomes_clusters, robustness_func) - -# Find scenarios in the three highest clusters for all metrics -highest_clusters(x) = x .>= x .∈ [sort(x; rev=true)[1:3]] -high_scens = ADRIA.analysis.find_scenarios(outcomes, outcomes_clusters, highest_clusters) -``` -""" -function find_scenarios( - outcomes::AbstractMatrix{<:Real}, - clusters::AbstractVector{Int64}, - filter_func::Function; - aggregation_func::Function=temporal_variability, -)::BitVector - clusters_summary::Vector{Float64} = zeros(length(unique(clusters))) - - for (idx_c, c) in enumerate(unique(clusters)) - cluster_metric = outcomes[:, clusters.==c] - - # Median series for current cluster - tf = axes(cluster_metric, :timesteps) - timesteps_slices = JuliennedArrays.Slices(cluster_metric[timesteps=tf], 2) - median_series = median.(timesteps_slices) - - # Summary statistics for that cluster metric - clusters_summary[idx_c] = aggregation_func(median_series) - end - - return clusters .∈ [unique(clusters)[filter_func(clusters_summary)]] -end -function find_scenarios( - outcomes::AbstractArray{<:Real,3}, - clusters::AbstractMatrix{Int64}, - filter_funcs::Vector{Function}; - aggregation_func::Function=temporal_variability, -)::BitVector - scenarios = trues(size(clusters, 1)) - - # Find scenarios for each clustered outcomes - for (idx, clust) in enumerate(eachcol(clusters)) - scenarios = - scenarios .& find_scenarios( - outcomes[:, :, idx], - collect(clust), - filter_funcs[idx]; - aggregation_func=aggregation_func, - ) - end - - return scenarios -end -function find_scenarios( - outcomes::AbstractArray{<:Real,3}, - clusters::AbstractMatrix{Int64}, - filter_func::Function; - aggregation_func::Function=temporal_variability, -)::BitVector - filter_funcs::Vector{Function} = fill(filter_func, size(clusters, 2)) - return find_scenarios( - outcomes, clusters, filter_funcs; aggregation_func=aggregation_func - ) -end +using Distances +using Clustering +using FLoops +using ADRIA +using JuliennedArrays + +""" + _complexity(x::AbstractMatrix{<:Real})::AbstractMatrix{Float64} + +Compute Complexity (CE) of an Matrix `x` of shape \$T ⋅ S\$, where \$T\$ is total number of +time steps and \$S\$ is number of scenarios. + +# Arguments +- `x` : series matrix of shape \$T ⋅ S\$ + +# Returns +Vector of \$N\$ elements +""" +function _complexity(x::AbstractMatrix{<:Real})::Vector{Float64} + return vec(sqrt.(sum(diff(Matrix(x); dims=1) .^ 2; dims=1)) .+ 1) +end + +""" + correction_factor(ce_i::T, ce_j::T)::Float64 where {T<:Real} + +Compute Correction Factor (CF) between two time series complexities `ce_i` and `ce_j`. + +# Arguments +- `ce_i` : Time series `i` +- `ce_j` : Time series `j` + +# Returns +Float64 + +# Examples +```julia +julia> ce = complexity([[1, 2, 3] [1, 3, 4]]) +julia> correction_factor(ce[1], ce[2]) +Float64: + 2.5 + ``` +""" +function correction_factor(ce_i::T, ce_j::T)::Float64 where {T<:Real} + return max(ce_i, ce_j) / min(ce_i, ce_j) +end + +""" + complexity_invariance_distance(data::AbstractMatrix{<:Real}; distance=:euclidean)::AbstractMatrix{Float64} + +Compute Complexity Invariance Distance (CID) between every matrix column pairs (`data`) of +shape \$T ⋅ S\$. The distance between every two series is the weighted euclidian distanced +multiplied by the correction factor, which takes into account the ration between the two +series complexities. Returns a matrix of distances (\$S ⋅ S\$). + +# Arguments +- `data` : Matrix of \$T ⋅ S\$, where \$T\$ is total number of time steps and \$S\$ is +number of scenarios +- `distance` : Switch between Euclidean (`:euclidean`) or weighted Euclidean (`:weuclidean`) +distance measurements. Defaults to `:euclidean` + +# Returns +Matrix of complexity invariance distances. +""" +function complexity_invariance_distance( + data::AbstractMatrix{<:Real}; + distance=:euclidean +)::AbstractMatrix{Float64} + # Compute complexity vector + complexity = _complexity(data) + + # Create empty Matrix + data_size = size(data, 2) + cid_matrix::AbstractMatrix{Float64} = zeros(data_size, data_size) + + local weights::Vector{Float64} + if distance == :weuclidean + # [1, 1/2, 1/3, ..., 1/n] + weights = sqrt.(1 ./ (1:size(data, 1))) + end + dist_fn(x, y) = (distance == :euclidean) ? euclidean(x, y) : weuclidean(x, y, weights) + + #? Do we want to normalize the amplitudes of all series? + # Iterate over data matrix to compute CID (Complexity Invariance Distance) + for i in axes(data, 2) + @floop for j in axes(data, 2) + ed = dist_fn(data[:, i], data[:, j]) + cf = correction_factor(complexity[i], complexity[j]) + cid_matrix[i, j] = cid_matrix[j, i] = ed * cf + end + end + + return cid_matrix +end + +""" + cluster_series(data::AbstractMatrix{<:Real}, n_clusters::Int64, method::Symbol=:kmedoids, distance::Symbol=:euclidean)::Vector{Int64} + +Hierarchically cluster \$S\$ scenarios with \$T\$ time steps each. + +# Arguments +- `data` : Matrix of \$T ⋅ S\$, where \$T\$ is total number of time steps and \$S\$ is + number of scenarios +- `n_clusters` : Number of clusters determined _a priori_ +- `method` : Clustering method. Defaults to `:kmedoids` +- `distance` : Switch between Euclidean (`:euclidean`) or weighted Euclidean (`:weuclidean`) +distance measurements. Defaults to `:euclidean` + +# Returns +- Cluster ids indicating each scenario cluster assignment. + +# References +1. Steinmann, P., Auping, W.L., Kwakkel, J.H., 2020. + Behavior-based scenario discovery using time series clustering. + Technological Forecasting and Social Change 156, 120052. + https://doi.org/10.1016/j.techfore.2020.120052 + +2. Batista, G.E.A.P.A., Keogh, E.J., Tataw, O.M., de Souza, V.M.A., 2014. + CID: an efficient complexity-invariant distance for time series. + Data Min Knowl Disc 28, 634-669. + https://doi.org/10.1007/s10618-013-0312-3 +""" +function cluster_series( + data::AbstractMatrix{<:Real}, + n_clusters::Int64; + method::Symbol=:kmedoids, + distance::Symbol=:euclidean +)::Vector{Int64} + # Calculate distantes matrix + distances = complexity_invariance_distance(data; distance=distance) + + if method == :kmedoids + return kmedoids(distances, n_clusters).assignments + end + + # Return hierarchical clustering with n_clusters + dendogram = hclust(distances; linkage=:average) + return cutree(dendogram; k=n_clusters) +end + +""" + cluster_scenarios(data::AbstractArray{<:Real}, n_clusters::Int64; method::Symbol=:kmedoids, distance::Symbol=:euclidean)::Array{Int64} + +Alias to cluster_series. + +# Arguments +- `data` : Matrix of \$T ⋅ S\$, where \$T\$ is total number of time steps and \$S\$ is + number of scenarios +- `n_clusters` : Number of clusters determined _a priori_ +- `method` : Clustering method. Defaults to `:kmedoids` +- `distance` : Switch between Euclidean (`:euclidean`) or weighted Euclidean (`:weuclidean`) +distance measurements. Defaults to `:euclidean` + +# Returns +- Cluster ids indicating each scenario cluster assignment. + +# Examples +One can cluster scenarios based on a single Metric, passing a Matrix of outcomes for each +timestep and scenario: + +```julia +# Matrix of outcomes +s_tac = ADRIA.metrics.scenario_total_cover(rs) + +# Cluster scenarios +n_cluster = 6 +clusters = ADRIA.analysis.cluster_series(s_tac, n_clusters) +``` + +And perform multiple clusterings, based on multiple Metrics, passing a 3-dimensional Array +(or YAXArray) of outcomes for each timestep, scenario and Metric. + +```julia +metrics::Vector{ADRIA.metrics.Metric} = [ + ADRIA.metrics.scenario_total_cover, + ADRIA.metrics.scenario_asv, + ADRIA.metrics.scenario_absolute_juveniles, +] + +# 3-dimensional array of outcomes +outcomes = ADRIA.metrics.scenario_outcomes(rs, metrics) + +# Cluster scenarios +num_clusters = 6 +outcomes_clusters = ADRIA.analysis.cluster_scenarios(outcomes, num_clusters) +``` +""" +function cluster_scenarios( + data::AbstractArray{<:Real}, + n_clusters::Int64; + method::Symbol=:kmedoids, + distance::Symbol=:euclidean +)::Array{Int64} + ndims(data) == 2 && return cluster_series(data, n_clusters) + + _, n_scenarios, n_metrics = size(data) + + clusters = zeros(Int64, n_scenarios, n_metrics) + for m in 1:n_metrics + clusters[:, m] = cluster_series( + data[:, :, m], n_clusters; method=method, distance=distance + ) + end + + return clusters +end + +""" + target_clusters(clusters::Vector{T}, outcomes::AbstractMatrix{<:Real}; metric=temporal_variability, size_limit=0.01) where {T<:Int64} + +Cluster scenarios into target and non target based on median outcome temporal variability of +previous time series cluster. + +# Arguments +- `clusters` : Vector with outcome cluster indexes +- `outcomes` : AbstractMatrix of scenario outcomes +- `metric` : Metric used to aggregate outcomes for each cluster +- `size_limit` : This function will iteratively merge the best cluster with the second best + if the fraction of scenarios inside it is below `size_limit` + +# Returns +Vector containing 1's for target and 0's for non-target clusters +""" +function target_clusters( + clusters::Vector{T}, + outcomes::AbstractMatrix{<:Real}; + metric=temporal_variability, + size_limit=0.01 +)::Vector{T} where {T<:Int64} + + # Compute statistic for each cluster + clusters_statistics::Vector{Float64} = [] + for cluster in unique(clusters) + normalized_outcomed = outcomes[:, clusters .== cluster] ./ maximum(outcomes) + statistic = median(metric(normalized_outcomed)) + push!(clusters_statistics, statistic) + end + + target_index = argmax(clusters_statistics) + target_indexes = [target_index] + + # Merge target cluster if it is below 1% of size + sizes = [size(outcomes[:, clusters .== c], 2) for c in unique(clusters)] + target_size = sizes[target_index] / sum(sizes) + while target_size < size_limit + # Nullify target_index to find the next argmax + clusters_statistics[target_index] = 0 + + # Find next best cluster and add to target_indexes + target_index = argmax(clusters_statistics) + push!(target_indexes, target_index) + + # Update target_size with next best cluster size + target_size += sizes[target_index] / sum(sizes) + end + + # Return new clusters vector with only 1 and 0 for target and non-target clusters + return [c ∈ target_indexes ? 1 : 0 for c in clusters] +end + +""" + find_scenarios(outcomes::AbstractMatrix{<:Real}, clusters::Vector{Int64}, filter_func::Function, aggregation_func::Function=temporal_variability)::BitVector + find_scenarios(outcomes::AbstractArray{<:Real,3}, clusters::AbstractMatrix{Int64}, filter_funcs::Vector{Function}; aggregation_function::Function=temporal_variability)::BitVector + find_scenarios(outcomes::AbstractArray{<:Real,3}, clusters::AbstractMatrix{Int64}, filter_func::Function; aggregation_function::Function=temporal_variability)::BitVector + +If outcomes is Matrix of scenario outcomes and clusters is a Vector of clusters: +- Computes a median series for each cluster +- Use aggregation_func to compute a summary statistics for each median series +- Select scenarios for which `filter_func` returns true + +If outcomes is a 3-dimensional array of scenario outcomes: +- Computes a median series for each outcome cluster +- Use aggregation_func to compute a summary statistics for each median series +- Select scenarios for which `filter_func` returns true for each matrix of outcomes +- Select scenarios that were selected for all outcomes + +# Arguments +- `outcomes` : Outcomes for one or more scenario metrics +- `clusters` : Clusters for one or more scenario metric outcomes +- `filter_funcs` : Function used to filter/target clusters +- `aggregation_function` : Function used to aggregate each median temporal series into a + single number (default is temporal_variability) + +# Returns +BitVector with true/false for selected/not selected scenarios + +# Examples +```julia +metrics = [ + ADRIA.metrics.scenario_total_cover, + ADRIA.metrics.scenario_asv +] + +# Get outcomes +outcomes = ADRIA.metrics.scenario_outcomes(rs, metrics) +num_clusters = 6 + +# Cluster scenarios based on outcomes +outcomes_clusters = ADRIA.analysis.cluster_scenarios(outcomes, num_clusters) + +# Find scenarios above 0.25-quantile for all metrics +robustness_func(x) = x .>= quantile(x, 0.25) +robust_scens = ADRIA.analysis.find_scenarios(outcomes, outcomes_clusters, robustness_func) + +# Find scenarios in the three highest clusters for all metrics +highest_clusters(x) = x .>= x .∈ [sort(x; rev=true)[1:3]] +high_scens = ADRIA.analysis.find_scenarios(outcomes, outcomes_clusters, highest_clusters) +``` +""" +function find_scenarios( + outcomes::AbstractMatrix{<:Real}, + clusters::AbstractVector{Int64}, + filter_func::Function; + aggregation_func::Function=temporal_variability +)::BitVector + clusters_summary::Vector{Float64} = zeros(length(unique(clusters))) + + for (idx_c, c) in enumerate(unique(clusters)) + cluster_metric = outcomes[:, clusters .== c] + + # Median series for current cluster + tf = axes(cluster_metric, :timesteps) + timesteps_slices = JuliennedArrays.Slices(cluster_metric[timesteps=tf], 2) + median_series = median.(timesteps_slices) + + # Summary statistics for that cluster metric + clusters_summary[idx_c] = aggregation_func(median_series) + end + + return clusters .∈ [unique(clusters)[filter_func(clusters_summary)]] +end +function find_scenarios( + outcomes::AbstractArray{<:Real,3}, + clusters::AbstractMatrix{Int64}, + filter_funcs::Vector{Function}; + aggregation_func::Function=temporal_variability +)::BitVector + scenarios = trues(size(clusters, 1)) + + # Find scenarios for each clustered outcomes + for (idx, clust) in enumerate(eachcol(clusters)) + scenarios = + scenarios .& find_scenarios( + outcomes[:, :, idx], + collect(clust), + filter_funcs[idx]; + aggregation_func=aggregation_func + ) + end + + return scenarios +end +function find_scenarios( + outcomes::AbstractArray{<:Real,3}, + clusters::AbstractMatrix{Int64}, + filter_func::Function; + aggregation_func::Function=temporal_variability +)::BitVector + filter_funcs::Vector{Function} = fill(filter_func, size(clusters, 2)) + return find_scenarios( + outcomes, clusters, filter_funcs; aggregation_func=aggregation_func + ) +end diff --git a/src/analysis/intervention.jl b/src/analysis/intervention.jl index e020b3e6e..09d12abe9 100644 --- a/src/analysis/intervention.jl +++ b/src/analysis/intervention.jl @@ -33,7 +33,9 @@ robust_scens = ADRIA.analysis.find_robust(rs, y, rule_func, [45, 60]) # Retrieve seeding intervention frequency for robust scenarios robust_selection_frequencies = ADRIA.analysis.intervention_frequency(rs, robust_scens, :seed) """ -function intervention_frequency(rs::ResultSet, scen_indices::NamedTuple, log_type::Symbol)::YAXArray +function intervention_frequency( + rs::ResultSet, scen_indices::NamedTuple, log_type::Symbol +)::YAXArray log_type ∈ [:seed, :shade, :fog] || ArgumentError("Unsupported log") # Get requested log @@ -44,8 +46,12 @@ function intervention_frequency(rs::ResultSet, scen_indices::NamedTuple, log_typ interv_freq = ZeroDataCube(; T=Float64, locations=rs.site_ids, rcps=rcps) for rcp in rcps # Select scenarios satisfying condition and tally selection for each location - logged_data = dropdims(sum(interv_log[scenarios=scen_indices[rcp]], dims=:coral_id), dims=:coral_id) - interv_freq[rcps=At(rcp)] .= vec(dropdims(sum(logged_data .> 0, dims=(:timesteps, :scenarios)), dims=:timesteps)) + logged_data = dropdims( + sum(interv_log[scenarios=scen_indices[rcp]]; dims=:coral_id); dims=:coral_id + ) + interv_freq[rcps=At(rcp)] .= vec( + dropdims(sum(logged_data .> 0; dims=(:timesteps, :scenarios)); dims=:timesteps) + ) end return interv_freq diff --git a/src/analysis/pareto.jl b/src/analysis/pareto.jl index ac69eaae1..6e960fae1 100644 --- a/src/analysis/pareto.jl +++ b/src/analysis/pareto.jl @@ -36,7 +36,9 @@ optimal = ADRIA.analysis.find_pareto_optimal(rs, y, [45, 60]) # (RCP45 = [13, 48, 54, 65, 95], RCP60 = [274, 315, 356, 430, 455]) ``` """ -function find_pareto_optimal(scens::DataFrame, y::AbstractArray, rcps::Vector{Int}; offset::Int=0)::NamedTuple +function find_pareto_optimal( + scens::DataFrame, y::AbstractArray, rcps::Vector{Int}; offset::Int=0 +)::NamedTuple x_idx = [scens.RCP .== rcp for rcp in rcps] r_rcp = [reduce(vcat, nds(y[rcp_idx, :], offset)) for rcp_idx in x_idx] @@ -44,7 +46,9 @@ function find_pareto_optimal(scens::DataFrame, y::AbstractArray, rcps::Vector{In return NamedTuple{Tuple(Symbol.("RCP" .* string.(rcps)))}(scen_ids) end -function find_pareto_optimal(rs::ResultSet, y::AbstractArray, rcps::Vector; offset::Int=0)::NamedTuple +function find_pareto_optimal( + rs::ResultSet, y::AbstractArray, rcps::Vector; offset::Int=0 +)::NamedTuple return find_pareto_optimal(rs.inputs, y, rcps; offset=offset) end @@ -88,7 +92,9 @@ robust = ADRIA.analysis.find_robust(rs, y, rule_func, [45, 60]) # (RCP45 = [13, 65], RCP60 = [274, 455]) ``` """ -function find_robust(scens::DataFrame, y::AbstractArray, rule, rcps::Vector{Int64}; offset::Int64=0)::NamedTuple +function find_robust( + scens::DataFrame, y::AbstractArray, rule, rcps::Vector{Int64}; offset::Int64=0 +)::NamedTuple y = col_normalize(copy(y)) opt = find_pareto_optimal(scens, y, rcps; offset=offset) @@ -107,6 +113,8 @@ function find_robust(scens::DataFrame, y::AbstractArray, rule, rcps::Vector{Int6 return NamedTuple{keys(opt)}(vals) end -function find_robust(rs::ResultSet, y::AbstractArray, rule, rcps::Vector{Int64}; offset::Int64=0)::NamedTuple +function find_robust( + rs::ResultSet, y::AbstractArray, rule, rcps::Vector{Int64}; offset::Int64=0 +)::NamedTuple return find_robust(rs.inputs, y, rule, rcps; offset=offset) end diff --git a/src/analysis/rule_extraction.jl b/src/analysis/rule_extraction.jl index d054407f0..a2975e2e5 100644 --- a/src/analysis/rule_extraction.jl +++ b/src/analysis/rule_extraction.jl @@ -27,8 +27,12 @@ See also [`Rule`](@ref). # Returns Vector{ADRIA.analysis.Rule{Vector{Float64}, Vector{Vector}}} """ -function rules(rules::SIRUS.StableRules{Int64})::Vector{Rule{Vector{Vector},Vector{Float64}}} - [Rule(_condition(rules, i), _consequent(rules, i)) for i in eachindex(rules.rules)] +function rules( + rules::SIRUS.StableRules{Int64} +)::Vector{Rule{Vector{Vector},Vector{Float64}}} + return [ + Rule(_condition(rules, i), _consequent(rules, i)) for i in eachindex(rules.rules) + ] end """ @@ -88,7 +92,8 @@ compact form. - `rules` : Vector of Rule objects """ function print_rules(rules::Vector{Rule{Vector{Vector},Vector{Float64}}})::Nothing - condition(rule) = [c[2] == :L ? "$(c[1]) < $(c[3])" : "$(c[1]) ≥ $(c[3])" for c in rule.condition] + condition(rule) = + [c[2] == :L ? "$(c[1]) < $(c[3])" : "$(c[1]) ≥ $(c[3])" for c in rule.condition] consequent(rule) = " then $(rule.consequent[1]) else $(rule.consequent[2])\n" rule_string(rule) = "if " * join(condition(rule), " & ") * consequent(rule) print(join([rule_string(rule) for rule in rules])) @@ -96,7 +101,6 @@ function print_rules(rules::Vector{Rule{Vector{Vector},Vector{Float64}}})::Nothi return nothing end - """ cluster_rules(clusters::Vector{T}, X::DataFrame, max_rules::T; seed::Int64=123, kwargs...) where {T<:Integer,F<:Real} cluster_rules(clusters::Union{BitVector,Vector{Bool}}, X::DataFrame, max_rules::T; kwargs...) where {T<:Int64} @@ -159,5 +163,5 @@ Sum of biggest probabilities for each rule consequent - `rules` : Vector of Rule objects """ function maximum_probability(rules::Vector{Rule{Vector{Vector},Vector{Float64}}}) - sum([maximum(rule.consequent) for rule in rules]) + return sum([maximum(rule.consequent) for rule in rules]) end diff --git a/src/analysis/sensitivity.jl b/src/analysis/sensitivity.jl index 30657b702..fb18b8bfd 100644 --- a/src/analysis/sensitivity.jl +++ b/src/analysis/sensitivity.jl @@ -156,7 +156,7 @@ function pawn( X::AbstractMatrix{<:Real}, y::AbstractVector{<:Real}, factor_names::Vector{String}; - S::Int64=10, + S::Int64=10 )::YAXArray N, D = size(X) step = 1 / S @@ -174,13 +174,13 @@ function pawn( X_di = @view(X[:, d_i]) X_q .= quantile(X_di, seq) - Y_sel = @view(y[X_q[1].<=X_di.<=X_q[2]]) + Y_sel = @view(y[X_q[1] .<= X_di .<= X_q[2]]) if length(Y_sel) > 0 pawn_t[1, d_i] = ks_statistic(ApproximateTwoSampleKSTest(Y_sel, y)) end for s in 2:S - Y_sel = @view(y[X_q[s]. 1 && D > 1 msg::String = string( "The current implementation of PAWN can only assess a single quantity", - " of interest at a time.", + " of interest at a time." ) throw(ArgumentError(msg)) end @@ -283,7 +283,7 @@ function convergence( y::YAXArray, target_factors::Vector{Symbol}; Si::Function=pawn, - n_steps::Int64=10, + n_steps::Int64=10 )::YAXArray N = length(y.scenarios) step_size = floor(Int64, N / n_steps) @@ -293,14 +293,16 @@ function convergence( T=Float64, factors=target_factors, Si=[:min, :lb, :mean, :median, :ub, :max, :std, :cv], - n_scenarios=N_it, + n_scenarios=N_it ) scens_idx = randperm(N) for nn in N_it - pawn_store[n_scenarios=At(nn)] .= Si(X[scens_idx[1:nn], :], Array(y[scens_idx[1:nn]]))[ - factors=At(target_factors) - ] + pawn_store[n_scenarios=At(nn)] .= Si( + X[scens_idx[1:nn], :], Array(y[scens_idx[1:nn]]) + )[ + factors=At(target_factors) +] end return pawn_store @@ -311,12 +313,12 @@ function convergence( y::YAXArray, components::Vector{Symbol}; Si::Function=pawn, - n_steps::Int64=10, + n_steps::Int64=10 )::YAXArray ms = model_spec(rs) target_factors = [ - ms[ms[:, "component"].==cc, "fieldname"] for + ms[ms[:, "component"] .== cc, "fieldname"] for cc in string.(components) ] @@ -327,7 +329,7 @@ function convergence( T=Float64, factors=components, Si=collect(Si_n.Si), - n_scenarios=collect(Si_n.n_scenarios), + n_scenarios=collect(Si_n.n_scenarios) ) for (cc, factors) in zip(components, target_factors) @@ -385,7 +387,7 @@ function tsa(X::DataFrame, y::AbstractMatrix{<:Real})::YAXArray T=Float64, factors=Symbol.(names(X)), Si=[:min, :lb, :mean, :median, :ub, :max, :std, :cv], - timesteps=ts, + timesteps=ts ) for t in axes(y, 1) @@ -471,46 +473,46 @@ function rsa( sel = trues(N) foi_spec::DataFrame = _get_factor_spec(model_spec, factors) - unordered_cat = foi_spec.fieldname[foi_spec.ptype.=="unordered categorical"] + unordered_cat = foi_spec.fieldname[foi_spec.ptype .== "unordered categorical"] seq_store::Dict{Symbol,Vector{Float64}} = Dict() # storage for bin sequences # Get unique bin sequences for unordered categorical variables and store for factor in unordered_cat - S_temp = _category_bins(foi_spec[foi_spec.fieldname.==factor, :]) - seq_store[factor] = collect(0.0:(1/S_temp):1.0) + S_temp = _category_bins(foi_spec[foi_spec.fieldname .== factor, :]) + seq_store[factor] = collect(0.0:(1 / S_temp):1.0) end # Other variables have default sequence using input S - seq_store[:default] = collect(0.0:(1/S):1.0) + seq_store[:default] = collect(0.0:(1 / S):1.0) default_ax = (Dim{:default}(seq_store[:default][2:end]),) # YAXArray storage for unordered categorical variables yax_store_cat = Tuple(( YAXArray( (Dim{fact_t}(seq_store[fact_t][2:end]),), - zeros(Union{Missing,Float64}, (length(seq_store[fact_t][2:end]))), + zeros(Union{Missing,Float64}, (length(seq_store[fact_t][2:end]))) ) for fact_t in unordered_cat )) # YAXArray storage for other variables yax_store_default = Tuple( YAXArray( default_ax, zeros(Union{Missing,Float64}, (length(seq_store[:default][2:end]))) - ) for _ in 1:(length(factors)-length(unordered_cat)) + ) for _ in 1:(length(factors) - length(unordered_cat)) ) # Create storage NamedTuples for unordered categorical variables and other variables, then merge r_s_default = NamedTuple( zip( - Tuple(foi_spec.fieldname[foi_spec.ptype.!="unordered categorical"]), - yax_store_default, - ), + Tuple(foi_spec.fieldname[foi_spec.ptype .!= "unordered categorical"]), + yax_store_default + ) ) r_s_cat = NamedTuple(zip(Tuple(unordered_cat), yax_store_cat)) r_s = merge(r_s_cat, r_s_default) for fact_t in factors f_ind = foi_spec.fieldname .== fact_t - ptype::String = foi_spec.ptype[foi_spec.fieldname.==fact_t][1] + ptype::String = foi_spec.ptype[foi_spec.fieldname .== fact_t][1] X_i .= X[:, fact_t] @@ -530,8 +532,8 @@ function rsa( r_s[fact_t][1] = KSampleADTest(y[sel], y[Not(sel)]).A²k end - for s in 2:(length(X_q)-1) - sel .= X_q[s] .< X_i .<= X_q[s+1] + for s in 2:(length(X_q) - 1) + sel .= X_q[s] .< X_i .<= X_q[s + 1] if count(sel) == 0 || length(y[Not(sel)]) == 0 || length(unique(y[sel])) == 1 # not enough samples, or inactive area of factor space r_s[fact_t][s] = missing @@ -558,8 +560,8 @@ function rsa( return rsa( rs.inputs[!, Not(:RCP)][!, factors], y, - rs.model_spec[rs.model_spec.fieldname.∈[factors], :]; - S=S, + rs.model_spec[rs.model_spec.fieldname .∈ [factors], :]; + S=S ) end @@ -615,7 +617,7 @@ function outcome_map( model_spec::DataFrame; S::Int64=10, n_boot::Int64=100, - conf::Float64=0.95, + conf::Float64=0.95 )::YAXArray if !all(target_factors .∈ [model_spec.fieldname]) missing_factor = .!(target_factors .∈ [model_spec.fieldname]) @@ -629,13 +631,13 @@ function outcome_map( S = _category_bins(foi_spec[is_cat, :]) end - steps = collect(0.0:(1/S):1.0) + steps = collect(0.0:(1 / S):1.0) p_table = DataCube( zeros(Union{Missing,Float64}, length(steps) - 1, length(target_factors), 3); bins=string.(steps[2:end]), factors=Symbol.(target_factors), - CI=[:mean, :lower, :upper], + CI=[:mean, :lower, :upper] ) all_p_rule = _map_outcomes(y, rule) @@ -652,21 +654,21 @@ function outcome_map( X_q = zeros(S + 1) for (j, fact_t) in enumerate(target_factors) X_f = X[:, fact_t] - ptype = model_spec.ptype[model_spec.fieldname.==fact_t][1] + ptype = model_spec.ptype[model_spec.fieldname .== fact_t][1] if occursin("categorical", ptype) X_q .= _get_cat_quantile(foi_spec, fact_t, steps) else S = S_default steps = steps_default - X_q[1:(S+1)] .= quantile(X_f, steps) + X_q[1:(S + 1)] .= quantile(X_f, steps) end - for i in 1:length(X_q[1:(end-1)]) + for i in 1:length(X_q[1:(end - 1)]) local b::BitVector if i == 1 - b = (X_q[i] .<= X_f .<= X_q[i+1]) + b = (X_q[i] .<= X_f .<= X_q[i + 1]) else - b = (X_q[i] .< X_f .<= X_q[i+1]) + b = (X_q[i] .< X_f .<= X_q[i + 1]) end b = b .& behave @@ -692,7 +694,7 @@ function outcome_map( rule::Union{Function,BitVector,Vector{Int64}}; S::Int64=20, n_boot::Int64=100, - conf::Float64=0.95, + conf::Float64=0.95 )::YAXArray return outcome_map(X, y, rule, names(X); S, n_boot, conf) end @@ -703,7 +705,7 @@ function outcome_map( target_factors::Vector{Symbol}; S::Int64=20, n_boot::Int64=100, - conf::Float64=0.95, + conf::Float64=0.95 )::YAXArray return outcome_map( rs.inputs[:, Not(:RCP)], y, rule, target_factors, rs.model_spec; S, n_boot, conf @@ -715,7 +717,7 @@ function outcome_map( rule::Union{Function,BitVector,Vector{Int64}}; S::Int64=20, n_boot::Int64=100, - conf::Float64=0.95, + conf::Float64=0.95 )::YAXArray return outcome_map( rs.inputs[:, Not(:RCP)], y, rule, names(rs.inputs), rs.model_spec; S, n_boot, conf diff --git a/src/decision/Criteria/DecisionPreferences.jl b/src/decision/Criteria/DecisionPreferences.jl index 6493f163a..2b3e86103 100644 --- a/src/decision/Criteria/DecisionPreferences.jl +++ b/src/decision/Criteria/DecisionPreferences.jl @@ -216,7 +216,7 @@ function apply_threshold( target_criteria = dm.criteria .== criteria_name target_vals = dm.data[:, target_criteria] - valid_locs = vec(threshold[1] .<= target_vals .<= (threshold[1]+threshold[2])) + valid_locs = vec(threshold[1] .<= target_vals .<= (threshold[1] + threshold[2])) return dm[location=valid_locs] end diff --git a/src/decision/Criteria/DecisionWeights.jl b/src/decision/Criteria/DecisionWeights.jl index 3d3b7aa3c..2d9cd2e31 100644 --- a/src/decision/Criteria/DecisionWeights.jl +++ b/src/decision/Criteria/DecisionWeights.jl @@ -4,7 +4,6 @@ using Distributions, Statistics abstract type DecisionWeights end abstract type DecisionThresholds end - """ DepthThresholds <: DecisionWeights @@ -17,7 +16,7 @@ Base.@kwdef struct DepthThresholds <: DecisionThresholds dist=DiscreteOrderedUniformDist, dist_params=(2.0, 5.0, 0.5), # Half metre intervals name="Minimum Depth", - description="Minimum depth for a site to be included for consideration.\nNote: This value will be replaced with the shallowest depth value found if all sites are found to be deeper than `depth_min + depth_offset`.", + description="Minimum depth for a site to be included for consideration.\nNote: This value will be replaced with the shallowest depth value found if all sites are found to be deeper than `depth_min + depth_offset`." ) depth_offset::Param = Factor( 10.0; @@ -25,7 +24,7 @@ Base.@kwdef struct DepthThresholds <: DecisionThresholds dist=DiscreteOrderedUniformDist, dist_params=(10.0, 25.0, 0.5), # Half metre intervals name="Depth Offset", - description="Offset from minimum depth, used to indicate maximum depth.", + description="Offset from minimum depth, used to indicate maximum depth." ) end diff --git a/src/decision/Criteria/FogCriteria.jl b/src/decision/Criteria/FogCriteria.jl index 8011489ef..1c2035e39 100644 --- a/src/decision/Criteria/FogCriteria.jl +++ b/src/decision/Criteria/FogCriteria.jl @@ -4,59 +4,59 @@ Criteria weights for fogging interventions. """ Base.@kwdef struct FogCriteriaWeights <: DecisionWeights - fog_heat_stress::Param= Factor( + fog_heat_stress::Param = Factor( 1.0; ptype="continuous", dist=Uniform, dist_params=(0.0, 1.0), direction=minimum, name="Fog Heat Stress", - description="Preference locations with lower heat stress for fogging.", + description="Preference locations with lower heat stress for fogging." ) - fog_wave_stress::Param= Factor( + fog_wave_stress::Param = Factor( 1.0; ptype="continuous", dist=Uniform, dist_params=(0.0, 1.0), direction=minimum, name="Fog Wave Stress", - description="Preference locations with lower wave activity for fogging.", + description="Preference locations with lower wave activity for fogging." ) - fog_in_connectivity::Param= Factor( + fog_in_connectivity::Param = Factor( 1.0; ptype="continuous", dist=Uniform, dist_params=(0.0, 1.0), direction=maximum, name="Incoming Connectivity (Fog)", - description="Give preference to locations with high incoming connectivity (i.e., receives larvae from other sites) for fogging deployments.", + description="Give preference to locations with high incoming connectivity (i.e., receives larvae from other sites) for fogging deployments." ) - fog_out_connectivity::Param= Factor( + fog_out_connectivity::Param = Factor( 1.0; ptype="continuous", dist=Uniform, dist_params=(0.0, 1.0), direction=maximum, name="Outgoing Connectivity (Fog)", - description="Give preference to locations with high outgoing connectivity (i.e., provides larvae to other sites) for fogging deployments.", + description="Give preference to locations with high outgoing connectivity (i.e., provides larvae to other sites) for fogging deployments." ) - fog_depth::Param= Factor( + fog_depth::Param = Factor( 1.0; ptype="continuous", dist=Uniform, dist_params=(0.0, 1.0), direction=minimum, name="Depth (Fog)", - description="Give preference to shallower locations for fogging deployments.", + description="Give preference to shallower locations for fogging deployments." ) - fog_coral_cover::Param= Factor( + fog_coral_cover::Param = Factor( 0.0; ptype="continuous", dist=Uniform, dist_params=(0.0, 1.0), direction=maximum, name="Fog Coral Cover", - description="Higher values give preference to sites with high coral cover for fogging deployments.", + description="Higher values give preference to sites with high coral cover for fogging deployments." ) # Disabled as they are currently unnecessary # fog_priority::Param= Factor( @@ -80,7 +80,8 @@ Base.@kwdef struct FogCriteriaWeights <: DecisionWeights end # Alias default constructor -FogPreferences(names, criteria, directions) = DecisionPreferences(names, criteria, directions) +FogPreferences(names, criteria, directions) = + DecisionPreferences(names, criteria, directions) function FogPreferences( dom, params::YAXArray diff --git a/src/decision/Criteria/SRMCriteria.jl b/src/decision/Criteria/SRMCriteria.jl index e6e17991e..653adc1ce 100644 --- a/src/decision/Criteria/SRMCriteria.jl +++ b/src/decision/Criteria/SRMCriteria.jl @@ -4,59 +4,59 @@ Weights for shading (Solar Radiation Management) interventions. """ Base.@kwdef struct SRMCriteriaWeights <: DecisionWeights - srm_heat_stress::Param= Factor( + srm_heat_stress::Param = Factor( 1.0; ptype="continuous", dist=Uniform, dist_params=(0.0, 1.0), direction=minimum, name="Shade Heat Stress", - description="Preference locations with lower heat stress for SRM.", + description="Preference locations with lower heat stress for SRM." ) - srm_wave_stress::Param= Factor( + srm_wave_stress::Param = Factor( 1.0; ptype="continuous", dist=Uniform, dist_params=(0.0, 1.0), direction=minimum, name="Shade Wave Stress", - description="Prefer locations with lower wave stress for SRM.", + description="Prefer locations with lower wave stress for SRM." ) - srm_connectivity::Param= Factor( + srm_connectivity::Param = Factor( 0.0; ptype="continuous", dist=Uniform, dist_params=(0.0, 1.0), direction=maximum, name="SRM Connectivity", - description="Preference locations with higher outgoing connectivity for SRM.", + description="Preference locations with higher outgoing connectivity for SRM." ) - srm_coral_cover::Param= Factor( + srm_coral_cover::Param = Factor( 0.0; ptype="continuous", dist=Uniform, dist_params=(0.0, 1.0), direction=maximum, name="Coral Cover (SRM)", - description="Give greater weight to locations with higher coral cover for SRM.", + description="Give greater weight to locations with higher coral cover for SRM." ) - srm_priority::Param= Factor( + srm_priority::Param = Factor( 0.0; ptype="continuous", dist=Uniform, dist_params=(0.0, 1.0), direction=maximum, name="Predecessor Priority (SRM)", - description="Relative importance of locations with higher outgoing connectivity to priority locations.", + description="Relative importance of locations with higher outgoing connectivity to priority locations." ) - srm_zone::Param= Factor( + srm_zone::Param = Factor( 0.0; ptype="continuous", dist=Uniform, dist_params=(0.0, 1.0), direction=maximum, name="Zone Predecessor (SRM)", - description="Relative importance of locations with higher outgoing connectivitiy to priority (target) zones.", + description="Relative importance of locations with higher outgoing connectivitiy to priority (target) zones." ) end @@ -65,5 +65,7 @@ function SRMPreferences( )::DecisionPreferences w::DataFrame = component_params(dom.model, SRMCriteriaWeights) - return DecisionPreferences(string.(w.fieldname), params[At(string.(w.fieldname))], w.direction) + return DecisionPreferences( + string.(w.fieldname), params[At(string.(w.fieldname))], w.direction + ) end diff --git a/src/decision/Criteria/SeedCriteria.jl b/src/decision/Criteria/SeedCriteria.jl index 54e3de4eb..bf5bc088a 100644 --- a/src/decision/Criteria/SeedCriteria.jl +++ b/src/decision/Criteria/SeedCriteria.jl @@ -11,7 +11,7 @@ Base.@kwdef struct SeedCriteriaWeights <: DecisionWeights dist_params=(0.8, 1.0), direction=minimum, name="Seed Heat Stress", - description="Importance of avoiding heat stress when seeding. Prefer locations with lower heat stress.", + description="Importance of avoiding heat stress when seeding. Prefer locations with lower heat stress." ) seed_wave_stress::Param = Factor( 0.3; @@ -20,7 +20,7 @@ Base.@kwdef struct SeedCriteriaWeights <: DecisionWeights dist_params=(0.0, 1.0), direction=maximum, name="Seed Wave Stress", - description="Prefer locations with higher wave activity.", + description="Prefer locations with higher wave activity." ) seed_in_connectivity::Param = Factor( 0.85; @@ -29,7 +29,7 @@ Base.@kwdef struct SeedCriteriaWeights <: DecisionWeights dist_params=(0.5, 1.0), direction=maximum, name="Incoming Connectivity (Seed)", - description="Give preference to locations with high incoming connectivity (i.e., receives larvae from other sites) for coral deployments.", + description="Give preference to locations with high incoming connectivity (i.e., receives larvae from other sites) for coral deployments." ) seed_out_connectivity::Param = Factor( 0.90; @@ -38,7 +38,7 @@ Base.@kwdef struct SeedCriteriaWeights <: DecisionWeights dist_params=(0.5, 1.0), direction=maximum, name="Outgoing Connectivity (Seed)", - description="Give preference to locations with high outgoing connectivity (i.e., provides larvae to other sites) for coral deployments.", + description="Give preference to locations with high outgoing connectivity (i.e., provides larvae to other sites) for coral deployments." ) seed_depth::Param = Factor( 0.95; @@ -47,7 +47,7 @@ Base.@kwdef struct SeedCriteriaWeights <: DecisionWeights dist_params=(0.8, 1.0), direction=maximum, name="Depth (Seed)", - description="Give preference to deeper locations for coral deployments.", + description="Give preference to deeper locations for coral deployments." ) seed_coral_cover::Param = Factor( 0.7; @@ -56,7 +56,7 @@ Base.@kwdef struct SeedCriteriaWeights <: DecisionWeights dist_params=(0.0, 1.0), direction=minimum, name="Seed Coral Cover", - description="Preference locations with lower coral cover (higher available space) for seeding deployments.", + description="Preference locations with lower coral cover (higher available space) for seeding deployments." ) # Disabled as they are currently unnecessary # seed_priority::Param = Factor( @@ -257,8 +257,8 @@ function disperse_locations( # Count the number of times each selected cluster appears # then identify clusters that breach the max membership rule selected_clusters, cluster_frequency, - rule_violators_idx, exceeded_clusters, - potential_alternatives = _update_state(cluster_ids, num_locs, max_members) + rule_violators_idx, exceeded_clusters, + potential_alternatives = _update_state(cluster_ids, num_locs, max_members) # If no cluster breaches the rule, then nothing to do! if length(rule_violators_idx) == 0 @@ -283,7 +283,7 @@ function disperse_locations( # Identify viable clusters that do not breach the rule alternates = vcat( fill(false, num_locs), - (cluster_ids .∈ Ref(potential_alternatives))[num_locs+1:end] + (cluster_ids .∈ Ref(potential_alternatives))[(num_locs + 1):end] ) if count(alternates) == 0 @@ -294,7 +294,7 @@ function disperse_locations( # Swap the worst/lowest location for the cluster that breaches the rule # for the next best location - worst_loc_idx = findlast(selected_clusters.==rule_violator) + worst_loc_idx = findlast(selected_clusters .== rule_violator) next_best_loc_idx = findfirst(alternates .> 0) # Swap selection for corresponding location, cluster ids and available space @@ -312,12 +312,15 @@ function disperse_locations( # Swapping out a location may include a location with not much space # so we reconsider how many locations we need - num_locs = max(findfirst(>=(area_to_seed), cumsum(available_space[ranked_locs])), n_iv_locs) + num_locs = max( + findfirst(>=(area_to_seed), cumsum(available_space[ranked_locs])), + n_iv_locs + ) # Update state selected_clusters, cluster_frequency, - rule_violators_idx, exceeded_clusters, - potential_alternatives = _update_state(cluster_ids, num_locs, max_members) + rule_violators_idx, exceeded_clusters, + potential_alternatives = _update_state(cluster_ids, num_locs, max_members) end if length(exceeded_clusters) == 0 @@ -358,5 +361,7 @@ function _update_state(cluster_ids::Vector, num_locs::Int64, max_members::Int64) exceeded_clusters = collect(keys(cluster_frequency))[rule_violators_idx] potential_alternatives = cluster_ids[cluster_ids .∉ Ref(exceeded_clusters)] - return selected_clusters, cluster_frequency, rule_violators_idx, exceeded_clusters, potential_alternatives + return selected_clusters, + cluster_frequency, rule_violators_idx, exceeded_clusters, + potential_alternatives end diff --git a/src/decision/dMCDA.jl b/src/decision/dMCDA.jl index 023a7f089..d11579e02 100644 --- a/src/decision/dMCDA.jl +++ b/src/decision/dMCDA.jl @@ -22,7 +22,6 @@ using DataFrames, JMcDM - # dummy functions to allow precompilation function unguided_selection() end function rank_sites!() end @@ -43,7 +42,7 @@ function mcda_methods() JMcDM.MAIRCA.MaircaMethod, JMcDM.MOORA.MooraMethod, JMcDM.PIV.PIVMethod, - JMcDM.VIKOR.VikorMethod, + JMcDM.VIKOR.VikorMethod ] end @@ -73,7 +72,7 @@ Align a vector of site rankings to match the indicated order in `s_order`. function align_rankings!(rankings::Array, s_order::Matrix, col::Int64)::Nothing # Fill target ranking column for (i, site_id) in enumerate(s_order[:, 1]) - rankings[rankings[:, 1].==site_id, col] .= i + rankings[rankings[:, 1] .== site_id, col] .= i end return nothing @@ -142,7 +141,9 @@ function summary_stat_env( w=0.5 )::Vector{Float64} if size(env_layer, 1) > 1 - return vec((mean(env_layer; dims=dims) .* w) .+ (std(env_layer; dims=dims) .* (1.0 - w))) + return vec( + (mean(env_layer; dims=dims) .* w) .+ (std(env_layer; dims=dims) .* (1.0 - w)) + ) end return vec(env_layer) diff --git a/src/decision/location_selection.jl b/src/decision/location_selection.jl index c712b44b9..442339373 100644 --- a/src/decision/location_selection.jl +++ b/src/decision/location_selection.jl @@ -106,7 +106,9 @@ function rank_locations( area_weighted_conn = dom.conn.data .* site_k_area(dom) conn_cache = similar(area_weighted_conn) - in_conn, out_conn, network = connectivity_strength(area_weighted_conn, sum_cover, conn_cache; out_method=eigenvector_centrality) + in_conn, out_conn, network = connectivity_strength( + area_weighted_conn, sum_cover, conn_cache; out_method=eigenvector_centrality + ) # strong_pred = strongest_source(g, network) scens = DataCube( @@ -126,19 +128,25 @@ function rank_locations( # Decisions should place more weight on environmental conditions # closer to the decision point plan_horizon = Int64(scen[At("plan_horizon")]) - decay = α .^ (1:plan_horizon+1).^2 + decay = α .^ (1:(plan_horizon + 1)) .^ 2 min_depth = scen[factors=At("depth_min")].data[1] depth_offset = scen[factors=At("depth_offset")].data[1] - depth_criteria = identify_within_depth_bounds(site_data.depth_med, min_depth, depth_offset) - valid_seed_locs = coral_habitable_locs .& depth_criteria .& (dom.site_ids .∈ Ref(target_seed_loc_ids)) + depth_criteria = identify_within_depth_bounds( + site_data.depth_med, min_depth, depth_offset + ) + valid_seed_locs = + coral_habitable_locs .& depth_criteria .& + (dom.site_ids .∈ Ref(target_seed_loc_ids)) considered_seed_locs = findall(valid_seed_locs) if count(valid_seed_locs) == 0 @warn "No valid seeding locations found for scenario $(scen_idx)" end - valid_fog_locs = coral_habitable_locs .& depth_criteria .& (dom.site_ids .∈ Ref(target_fog_loc_ids)) + valid_fog_locs = + coral_habitable_locs .& depth_criteria .& + (dom.site_ids .∈ Ref(target_fog_loc_ids)) if count(valid_fog_locs) == 0 @warn "No valid fogging locations found for scenario $(scen_idx)" end @@ -147,7 +155,10 @@ function rank_locations( leftover_space_m² = vec(leftover_space_scens[scen_idx, :]) corals = to_coral_spec(scenarios[scen_idx, :]) - area_to_seed = mean(n_corals * colony_mean_area(corals.mean_colony_diameter_m[corals.class_id.==2])) + area_to_seed = mean( + n_corals * + colony_mean_area(corals.mean_colony_diameter_m[corals.class_id .== 2]) + ) seed_pref = SeedPreferences(dom, scen) fog_pref = FogPreferences(dom, scen) @@ -204,7 +215,11 @@ function rank_locations( ) if !isempty(selected_seed_ranks) - ranks_store[locations=At(selected_seed_ranks), intervention=At(:seed), scenarios=scen_idx] .= 1:length(selected_seed_ranks) + ranks_store[ + locations=At(selected_seed_ranks), + intervention=At(:seed), + scenarios=scen_idx + ] .= 1:length(selected_seed_ranks) end end @@ -223,7 +238,11 @@ function rank_locations( fog_pref, fog_decision_mat, MCDA_approach, min_locs ) if !isempty(selected_fog_ranks) - ranks_store[locations=At(selected_fog_ranks), intervention=At(:fog), scenarios=scen_idx] .= 1:length(selected_fog_ranks) + ranks_store[ + locations=At(selected_fog_ranks), + intervention=At(:fog), + scenarios=scen_idx + ] .= 1:length(selected_fog_ranks) end end end @@ -282,26 +301,26 @@ Selection score """ function selection_score( ranks::YAXArray{T,3}, - iv_type::Union{Symbol,Int64}, -)::YAXArray where {T<:Union{Int64, Float32, Float64}} + iv_type::Union{Symbol,Int64} +)::YAXArray where {T<:Union{Int64,Float32,Float64}} # 1 is best rank, n_locs is worst rank, 0 are locations that were ignored # Determine the lowest rank for each scenario lowest_ranks = maximum([maximum(r) - for r in eachcol(ranks[intervention=At(iv_type)])]) + for r in eachcol(ranks[intervention=At(iv_type)])]) - return _calc_selection_score(ranks, lowest_ranks, iv_type, (:scenarios, )) + return _calc_selection_score(ranks, lowest_ranks, iv_type, (:scenarios,)) end function selection_score( - ranks::YAXArray{T, 4}, + ranks::YAXArray{T,4}, iv_type::Union{Symbol,Int64}; keep_time=false -)::YAXArray where {T<:Union{Int64, Float32, Float64}} +)::YAXArray where {T<:Union{Int64,Float32,Float64}} # [timesteps ⋅ locations ⋅ interventions ⋅ scenarios] # 1 is best rank, n_locs is worst rank, 0 values indicate locations that were ignored lowest_rank = maximum(ranks) # Determie dimensions to squash - dims = keep_time ? (:scenarios, ) : (:scenarios, :timesteps) + dims = keep_time ? (:scenarios,) : (:scenarios, :timesteps) selection_score = _calc_selection_score(ranks, lowest_rank, iv_type, dims) @@ -323,7 +342,12 @@ Note: If `timesteps` are to be squashed the scores are normalized against the ma # Returns YAXArray """ -function _calc_selection_score(ranks::YAXArray, lowest_rank::Union{Int64,Float64,Float32}, iv_type::Union{Symbol,Int64}, dims::Tuple)::YAXArray +function _calc_selection_score( + ranks::YAXArray, + lowest_rank::Union{Int64,Float64,Float32}, + iv_type::Union{Symbol,Int64}, + dims::Tuple +)::YAXArray # Subtract 1 from rank: # If the best rank is 1 and lowest is 3 (out of a single scenario): # ([lowest rank] - [rank]) / [lowest rank] @@ -340,9 +364,13 @@ function _calc_selection_score(ranks::YAXArray, lowest_rank::Union{Int64,Float64 # max((3 - 100) / 3, 0.0) # # results in 0.0 - tsliced = mapslices(x -> any(x .> 0) ? lowest_rank .- (x .- 1.0) : 0.0, ranks[intervention=At(iv_type)], dims="timesteps") + tsliced = mapslices( + x -> any(x .> 0) ? lowest_rank .- (x .- 1.0) : 0.0, + ranks[intervention=At(iv_type)]; + dims="timesteps" + ) selection_score = dropdims( - sum(tsliced, dims=dims); dims=dims + sum(tsliced; dims=dims); dims=dims ) times_ranked = size(ranks, :scenarios) @@ -384,11 +412,11 @@ function _times_selected( ranks::YAXArray{T}, iv_type::Union{Symbol,Int64}, squash::Union{Symbol,Tuple} -)::YAXArray where {T<:Union{Int64, Float32, Float64}} +)::YAXArray where {T<:Union{Int64,Float32,Float64}} s = copy(ranks[intervention=At(iv_type)]) s[s .> 0.0] .= 1.0 - return dropdims(sum(s, dims=squash); dims=squash) + return dropdims(sum(s; dims=squash); dims=squash) end """ @@ -423,18 +451,18 @@ scen_seed_freq = ADRIA.decision.selection_frequency(rs.ranks, 1) Normalized selection frequency """ function selection_frequency( - ranks::YAXArray{T, 3}, + ranks::YAXArray{T,3}, iv_type::Union{Symbol,Int64} -)::YAXArray where {T<:Union{Int64, Float32, Float64}} +)::YAXArray where {T<:Union{Int64,Float32,Float64}} # 0 is unconsidered locations, values > 0 indicate some rank was assigned. n_selected = _times_selected(ranks, iv_type, :scenarios) return n_selected ./ maximum(n_selected) end function selection_frequency( - ranks::YAXArray{T, 4}, + ranks::YAXArray{T,4}, iv_type::Union{Symbol,Int64} -)::YAXArray where {T<:Union{Int64, Float32, Float64}} +)::YAXArray where {T<:Union{Int64,Float32,Float64}} # 0 is unconsidered locations, values > 0 indicate some rank was assigned. n_selected = _times_selected(ranks, iv_type, (:scenarios, :timesteps)) @@ -470,9 +498,9 @@ function selection_ranks( ranks::YAXArray{T,4}, iv_type::Union{Symbol,Int64}; desc::Bool=true -)::Vector{Int64} where {T<:Union{Int64, Float32, Float64}} - sel_freq::YAXArray{T, 1} = selection_frequency(ranks, iv_type) - ranked::Vector{Int64} = sortperm(sel_freq.data, rev=desc) +)::Vector{Int64} where {T<:Union{Int64,Float32,Float64}} + sel_freq::YAXArray{T,1} = selection_frequency(ranks, iv_type) + ranked::Vector{Int64} = sortperm(sel_freq.data; rev=desc) return ranked end @@ -513,7 +541,7 @@ Summary stats of the number of deployment locations for each scenario function deployment_summary_stats( ranks::YAXArray{T,4}, iv_type::Union{Symbol,Int64} -)::YAXArray where {T<:Union{Int64, Float32, Float64}} +)::YAXArray where {T<:Union{Int64,Float32,Float64}} iv_ranks = ranks[intervention=At(iv_type)] # Min, Mean, Median, Max, stdev diff --git a/src/decision/mcda_methods.jl b/src/decision/mcda_methods.jl index feb36da40..dd4221589 100644 --- a/src/decision/mcda_methods.jl +++ b/src/decision/mcda_methods.jl @@ -12,7 +12,7 @@ Then orders sites from highest aggregate score to lowest. - aggregate score for ranking. """ function order_ranking(S::Array{Float64,2})::Array{Float64} - return sum(S, dims=2) + return sum(S; dims=2) end """ @@ -47,14 +47,14 @@ S_p = √{∑(criteria .- NIS)²} function adria_topsis(S::Array{Float64,2})::Array{Float64} # compute the set of positive ideal solutions for each criteria - PIS = maximum(S, dims=1) + PIS = maximum(S; dims=1) # compute the set of negative ideal solutions for each criteria - NIS = minimum(S, dims=1) + NIS = minimum(S; dims=1) # calculate separation distance from the ideal and non-ideal solutions - S_p = sqrt.(sum((S .- PIS) .^ 2, dims=2)) - S_n = sqrt.(sum((S .- NIS) .^ 2, dims=2)) + S_p = sqrt.(sum((S .- PIS) .^ 2; dims=2)) + S_n = sqrt.(sum((S .- NIS) .^ 2; dims=2)) # final ranking measure of relative closeness C C = S_n ./ (S_p + S_n) @@ -62,7 +62,6 @@ function adria_topsis(S::Array{Float64,2})::Array{Float64} return C end - """ adria_vikor(S; v=0.5) @@ -114,8 +113,8 @@ function adria_vikor(S::Matrix{Float64}; v::Float64=0.5)::Array{Float64} # Compute utility of the majority Sr (Manhatten Distance) # Compute individual regret R (Chebyshev distance) sr_arg = maximum(S) .- S - Sr = sum(sr_arg, dims=2) - R = maximum(sr_arg, dims=2) + Sr = sum(sr_arg; dims=2) + R = maximum(sr_arg; dims=2) # Compute the VIKOR compromise Q S_h, S_s = extrema(Sr) diff --git a/src/ecosystem/Ecosystem.jl b/src/ecosystem/Ecosystem.jl index 6c146bb3c..a1d787e34 100644 --- a/src/ecosystem/Ecosystem.jl +++ b/src/ecosystem/Ecosystem.jl @@ -26,7 +26,7 @@ function EnvironmentalLayer( dist=DiscreteUniform, dist_params=(1.0, Float64(size(dhw, 3))), name="DHW Scenario", - description="DHW scenario member identifier.", + description="DHW scenario member identifier." ), Factor( 1; @@ -34,7 +34,7 @@ function EnvironmentalLayer( dist=DiscreteUniform, dist_params=(1.0, Float64(size(wave, 3))), name="Wave Scenario", - description="Wave scenario member identifier.", + description="Wave scenario member identifier." ), Factor( 1; @@ -42,8 +42,8 @@ function EnvironmentalLayer( dist=DiscreteUniform, dist_params=(1.0, Float64(size(cyclone_mortality, 4))), name="Cyclone Mortality", - description="Cyclone mortality scenario identifier.", - ), + description="Cyclone mortality scenario identifier." + ) ) end @@ -166,7 +166,9 @@ Calculates the mean of the truncated normal distribution. # Returns The mean of the truncated normal distribution """ -function truncated_normal_mean(normal_mean::Float64, normal_stdev::Float64, lower_bound::Float64, upper_bound::Float64)::Float64 +function truncated_normal_mean( + normal_mean::Float64, normal_stdev::Float64, lower_bound::Float64, upper_bound::Float64 +)::Float64 alpha::Float64 = (lower_bound - normal_mean) / normal_stdev beta::Float64 = (upper_bound - normal_mean) / normal_stdev @@ -224,7 +226,6 @@ function truncated_normal_cdf( # Store error function of alpha to avoid duplicate calculations erf_alpha = rational_erf(alpha * StatsFuns.invsqrt2) - return (rational_erf(zeta * StatsFuns.invsqrt2) - erf_alpha) / (rational_erf(beta * StatsFuns.invsqrt2) - erf_alpha) end diff --git a/src/ecosystem/connectivity.jl b/src/ecosystem/connectivity.jl index d48b7cd39..de2be7a51 100644 --- a/src/ecosystem/connectivity.jl +++ b/src/ecosystem/connectivity.jl @@ -36,7 +36,7 @@ function site_connectivity( loc_ids::Vector{String}; conn_cutoff::Float64=1e-6, agg_func::Function=mean, - swap::Bool=false, + swap::Bool=false )::NamedTuple if !isdir(file_path) && !isfile(file_path) error("Could not find location: $(file_path)") @@ -56,9 +56,9 @@ function site_connectivity( years::Vector{String} = unique(getindex.(split.(conn_fns, "_"), 2)) # Organize files by their connectivity years - year_conn_fns = NamedTuple{Tuple(Symbol.("year_".*years))}( - [filter(x -> occursin(yr, x), joinpath.(file_path, conn_fns)) for yr in years] - ) + year_conn_fns = NamedTuple{Tuple(Symbol.("year_" .* years))}( + [filter(x -> occursin(yr, x), joinpath.(file_path, conn_fns)) for yr in years] +) # Create store for each year tmp_store::Vector{Matrix{Float64}} = Matrix{Float64}[] @@ -73,7 +73,7 @@ function site_connectivity( missingstring="NA", transpose=swap, types=Float64, - drop=[1], + drop=[1] ) ) for fn in assoc_files ] @@ -93,7 +93,7 @@ function site_connectivity( missingstring="NA", transpose=swap, types=Float64, - drop=[1], + drop=[1] ) conn_loc_ids::Vector{String} = names(conn_file1) @@ -141,7 +141,7 @@ function site_connectivity( unique_site_ids::Vector{Union{Missing,String}}; con_cutoff::Float64=1e-6, agg_func::Function=mean, - swap::Bool=false, + swap::Bool=false )::NamedTuple # Remove any row marked as missing diff --git a/src/ecosystem/corals/CoralGrowth.jl b/src/ecosystem/corals/CoralGrowth.jl index 3473a894a..a05815c99 100644 --- a/src/ecosystem/corals/CoralGrowth.jl +++ b/src/ecosystem/corals/CoralGrowth.jl @@ -12,7 +12,6 @@ struct CoralGrowth{A<:Integer,T<:NamedTuple} ode_p::T end - """ CoralGrowth(n_sites) @@ -35,15 +34,15 @@ function CoralGrowth(n_locs::Int64, n_groups::Int64, n_sizes::Int64)::CoralGrowt # TODO: The named tuple was for use with ODE solvers, which is no unneeded. # These caches should all be moved into the `CoralGrowth` struct. p = @NamedTuple{ - small::StaticArrays.SVector{5,Int64}, # indices for small size classes - mid::StaticArrays.SVector{25,Int64}, # indices for mid-size corals - large::StaticArrays.SVector{5,Int64}, # indices for large corals - rec::Matrix{Float64}, # recruitment values, where `s` relates to available space (not max carrying capacity) - sXr::Array{Float64, 3}, # s * X * r - X_mb::Array{Float64, 3}, # X * mb - r::Matrix{Float64}, # growth rate - mb::Matrix{Float64} # background mortality - }(( # cache matrix to hold X (current coral cover) + small::StaticArrays.SVector{5,Int64}, # indices for small size classes + mid::StaticArrays.SVector{25,Int64}, # indices for mid-size corals + large::StaticArrays.SVector{5,Int64}, # indices for large corals + rec::Matrix{Float64}, # recruitment values, where `s` relates to available space (not max carrying capacity) + sXr::Array{Float64,3}, # s * X * r + X_mb::Array{Float64,3}, # X * mb + r::Matrix{Float64}, # growth rate + mb::Matrix{Float64} # background mortality + }(( # cache matrix to hold X (current coral cover) # Cached indices small, mid, large, diff --git a/src/ecosystem/corals/Corals.jl b/src/ecosystem/corals/Corals.jl index 22a6fa732..aed29b30d 100644 --- a/src/ecosystem/corals/Corals.jl +++ b/src/ecosystem/corals/Corals.jl @@ -1,11 +1,9 @@ using DataFrames import ModelParameters: Model - # Upper bound offset to use when re-creating critical DHW distributions const HEAT_UB = 10.0 - """ functional_group_names()::Vector{Symbol} @@ -22,7 +20,6 @@ function functional_group_names()::Vector{Symbol} ] end - """ colony_mean_area(colony_diam_means::Array{T})::Array{T} where {T<:Real} @@ -58,13 +55,15 @@ Helper function defining coral colony diameter bin edges. The values are convert to the desired unit. The default unit is `m`. """ function bin_edges(; unit=:m) - return Matrix([ - 0.0 5.0 7.5 10.0 20.0 40.0 100.0 150.0; - 0.0 5.0 7.5 10.0 20.0 35.0 50.0 100.0; - 0.0 5.0 7.5 10.0 15.0 20.0 40.0 50.0; - 0.0 5.0 7.5 10.0 20.0 40.0 50.0 100.0; - 0.0 5.0 7.5 10.0 20.0 40.0 50.0 100.0 - ]) .* linear_scale(:cm, unit) + return Matrix( + [ + 0.0 5.0 7.5 10.0 20.0 40.0 100.0 150.0; + 0.0 5.0 7.5 10.0 20.0 35.0 50.0 100.0; + 0.0 5.0 7.5 10.0 15.0 20.0 40.0 50.0; + 0.0 5.0 7.5 10.0 20.0 40.0 50.0 100.0; + 0.0 5.0 7.5 10.0 20.0 40.0 50.0 100.0 + ] + ) .* linear_scale(:cm, unit) end # function bin_edges() # return Matrix([ @@ -82,7 +81,7 @@ end Helper function defining coral colony diameter bin widths. """ function bin_widths() - return bin_edges()[:, 2:end] .- bin_edges()[:, 1:end-1] + return bin_edges()[:, 2:end] .- bin_edges()[:, 1:(end - 1)] end """ @@ -125,7 +124,8 @@ function colony_areas() edges = bin_edges(; unit=:cm) # Diameters in cm - mean_cm_diameters = edges[:, 1:end-1] + (edges[:, 2:end] - edges[:, 1:end-1]) / 2.0 + mean_cm_diameters = + edges[:, 1:(end - 1)] + (edges[:, 2:end] - edges[:, 1:(end - 1)]) / 2.0 # To convert to cover we locate bin means and calculate bin mean areas colony_area_mean_cm2 = colony_mean_area(mean_cm_diameters) @@ -137,12 +137,11 @@ function bins_bounds(mean_diam::Matrix{Float64})::Matrix{Float64} bins::Matrix{Float64} = zeros(size(mean_diam)...) bins[:, 1] .= mean_diam[:, 1] .* 2 for i in 2:(size(mean_diam)[2]) - bins[:, i] .= (mean_diam[:, i] .- bins[:, i-1]) .* 2 .+ bins[:, i-1] + bins[:, i] .= (mean_diam[:, i] .- bins[:, i - 1]) .* 2 .+ bins[:, i - 1] end return bins end - """ coral_spec() @@ -234,7 +233,9 @@ function coral_spec()::NamedTuple # coral sizes are evenly distributed within each bin. # Second, growth as transitions of cover to higher bins is estimated as # rate of growth per year. - params.growth_rate .= reshape(growth_rate(_linear_extensions, bin_widths()), n_groups_and_sizes)[:] + params.growth_rate .= reshape( + growth_rate(_linear_extensions, bin_widths()), n_groups_and_sizes + )[:] # Scope for fecundity as a function of colony area (Hall and Hughes 1996) # Corymbose non-acropora uses the Stylophora data from Hall and Hughes with interpolation @@ -248,7 +249,7 @@ function coral_spec()::NamedTuple fec = exp.(log.(fec_par_a) .+ fec_par_b .* log.(colony_area_cm2)) ./ 0.1 # Colonies with area (in cm2) below indicated size are not fecund (reproductive) - fec[colony_area_cm2. 1.0) exceeded::BitVector = vec(loc_cover_cache .> 1.0) msg = """ @@ -68,7 +68,7 @@ function proportional_adjustment!( coral_cover::Union{SubArray{T,3},Array{T,3}}, loc_cover_cache::Vector{T} )::Nothing where {T<:Float64} loc_cover_cache .= vec(sum(coral_cover; dims=(1, 2))) - loc_cover_cache[loc_cover_cache.≈1.0] .= 1.0 + loc_cover_cache[loc_cover_cache .≈ 1.0] .= 1.0 if any(loc_cover_cache .> 1.0) exceeded::Vector{Int64} = findall(vec(loc_cover_cache .> 1.0)) msg = """ @@ -128,7 +128,7 @@ function growthODE(du::Matrix{Float64}, X::Matrix{Float64}, p::NamedTuple, t::Re # sXr : available space (sigma) * current cover (X) * growth rate (r) # X_mb : current cover (X) * background mortality (mb) p.X_mb .= X .* p.mb - p.sXr .= (max.(1.0 .- sum(X, dims=1), 0.0) .* X .* p.r) + p.sXr .= (max.(1.0 .- sum(X; dims=1), 0.0) .* X .* p.r) # For each size class, we determine the corals coming into size class due to growth, # and subtract those leaving the size class due to growth and background mortality @@ -140,8 +140,11 @@ function growthODE(du::Matrix{Float64}, X::Matrix{Float64}, p::NamedTuple, t::Re # # The smallest size class only has corals leaving the size class. @views @. du[p.small, :] = -(p.sXr[p.small, :] + p.X_mb[p.small, :]) - @views @. du[p.mid, :] = (p.sXr[p.mid-1, :] - p.X_mb[p.mid-1, :]) - (p.sXr[p.mid, :] + p.X_mb[p.mid, :]) - @views @. du[p.large, :] = (p.sXr[p.large-1, :] - p.X_mb[p.large-1, :]) + (p.sXr[p.large, :] - p.X_mb[p.large, :]) + @views @. du[p.mid, :] = + (p.sXr[p.mid - 1, :] - p.X_mb[p.mid - 1, :]) - (p.sXr[p.mid, :] + p.X_mb[p.mid, :]) + @views @. du[p.large, :] = + (p.sXr[p.large - 1, :] - p.X_mb[p.large - 1, :]) + + (p.sXr[p.large, :] - p.X_mb[p.large, :]) return nothing end @@ -232,7 +235,8 @@ function bleaching_mortality!(Y::AbstractArray{Float64,2}, # The model is modified to incorporate adaptation effect but maximum # reduction is to capped to 0. - @. capped_dhw = min.(ℯ^(0.17 + 0.35 * max(0.0, dhw' - (a_adapt + (tstep * n_adapt)))) - 1.0, 100.0) + @. capped_dhw = + min.(ℯ^(0.17 + 0.35 * max(0.0, dhw' - (a_adapt + (tstep * n_adapt)))) - 1.0, 100.0) @. depth_coeff = ℯ^(-0.07551 * (depth - 2.0)) # Estimate long-term bleaching mortality with an estimated depth coefficient and @@ -378,12 +382,12 @@ function bleaching_mortality!(cover::Matrix{Float64}, dhw::Vector{Float64}, return nothing end function bleaching_mortality!( - cover::AbstractArray{Float64, 3}, + cover::AbstractArray{Float64,3}, dhw::Vector{Float64}, depth_coeff::Vector{Float64}, stdev::AbstractMatrix{Float64}, - dist_t_1::AbstractArray{Float64, 3}, - dist_t::AbstractArray{Float64, 3}, + dist_t_1::AbstractArray{Float64,3}, + dist_t::AbstractArray{Float64,3}, prop_mort::SubArray{Float64} )::Nothing n_groups, n_sizes, n_locs = size(cover) @@ -492,14 +496,15 @@ function _shift_distributions!( # (values for size class 1 gets replaced by recruitment process) for i in length(growth_rate):-1:2 # Skip size class if nothing is moving up - sum(@view(cover[i-1:i])) == 0.0 ? continue : false + sum(@view(cover[(i - 1):i])) == 0.0 ? continue : false - prop_growth = @views (cover[i-1:i] ./ sum(cover[i-1:i])) .* (growth_rate[i-1:i] ./ sum(growth_rate[i-1:i])) + prop_growth = @views (cover[(i - 1):i] ./ sum(cover[(i - 1):i])) .* + (growth_rate[(i - 1):i] ./ sum(growth_rate[(i - 1):i])) if sum(prop_growth) == 0.0 continue end - dist_t[i] = sum(@view(dist_t[i-1:i]), Weights(prop_growth ./ sum(prop_growth))) + dist_t[i] = sum(@view(dist_t[(i - 1):i]), Weights(prop_growth ./ sum(prop_growth))) end return nothing @@ -540,15 +545,17 @@ function adjust_DHW_distribution!( # Combine distributions using a MixtureModel for all non-juvenile size # classes (we pass in all relevant size classes for the functional group here). - @views _shift_distributions!(cover[1, sc1:sc_end, loc], growth_rate[grp, :], dist_t[sc1:sc_end, loc]) + @views _shift_distributions!( + cover[1, sc1:sc_end, loc], growth_rate[grp, :], dist_t[sc1:sc_end, loc] + ) end end return nothing end function adjust_DHW_distribution!( - cover_t_1::SubArray{T, 3}, - dist_t::AbstractArray{T, 3}, + cover_t_1::SubArray{T,3}, + dist_t::AbstractArray{T,3}, growth_rate::Matrix{T} )::Nothing where {T<:Float64} groups, _, locs = axes(cover_t_1) @@ -595,7 +602,7 @@ function settler_DHW_tolerance!( settlers::Matrix{F}, fec_params_per_m²::Vector{F}, h²::F, - n_sizes::Int64, + n_sizes::Int64 )::Nothing where {F<:Float64} # Potential sink locations (TODO: pass in later) sink_loc_ids::Vector{Int64} = findall(k_area .> 0.0) @@ -615,24 +622,28 @@ function settler_DHW_tolerance!( # Calculate contribution to cover to determine weights for each species/group w = @views settlers[:, sink_loc]' .* tp[source_locs, sink_loc].data - w_per_group = w ./ sum(w, dims=1) + w_per_group = w ./ sum(w; dims=1) replace!(w_per_group, NaN => 0.0) # Determine new distribution mean for each species at all locations for (sp, sc1) in enumerate(settler_sc) - sc1_end::UnitRange{Int64} = sc1:sc1+(n_sizes-1) + sc1_end::UnitRange{Int64} = sc1:(sc1 + (n_sizes - 1)) # Get distribution mean of reproductive size classes at source locations # recalling that source locations may include the sink location due to # self-seeding. reproductive_sc .= @view(fec_params_per_m²[sc1_end]) .> 0.0 - settler_means::SubArray{Float64} = @view(c_mean_t_1[sc1_end[reproductive_sc], source_locs]) + settler_means::SubArray{Float64} = @view( + c_mean_t_1[sc1_end[reproductive_sc], source_locs] + ) # Determine weights based on contribution to recruitment. # This weights the recruited corals by the size classes and source locations # which contributed to recruitment. if sum(w_per_group[:, sp]) > 0.0 - ew::Vector{Float64} = repeat(w_per_group[:, sp], inner=count(reproductive_sc)) + ew::Vector{Float64} = repeat( + w_per_group[:, sp]; inner=count(reproductive_sc) + ) # Determine combined mean # https://en.wikipedia.org/wiki/Mixture_distribution#Properties @@ -647,8 +658,8 @@ function settler_DHW_tolerance!( return nothing end function settler_DHW_tolerance!( - c_mean_t_1::AbstractArray{F, 3}, - c_mean_t::AbstractArray{F, 3}, + c_mean_t_1::AbstractArray{F,3}, + c_mean_t::AbstractArray{F,3}, k_area::Vector{F}, tp::AbstractMatrix{F}, settlers::AbstractMatrix{F}, @@ -673,7 +684,7 @@ function settler_DHW_tolerance!( # Calculate contribution to cover to determine weights for each species/group w = @views settlers[:, sink_loc]' .* tp.data[source_locs, sink_loc] - w_per_group = w ./ sum(w, dims=1) + w_per_group = w ./ sum(w; dims=1) replace!(w_per_group, NaN => 0.0) for grp in groups @@ -681,20 +692,26 @@ function settler_DHW_tolerance!( # recalling that source locations may include the sink location due to # self-seeding. reproductive_sc .= @view(fec_params_per_m²[grp, :]) .> 0.0 - settler_means::SubArray{Float64} = @view(c_mean_t_1[grp, reproductive_sc, source_locs]) + settler_means::SubArray{Float64} = @view( + c_mean_t_1[grp, reproductive_sc, source_locs] + ) # Determine weights based on contribution to recruitment. # This weights the recruited corals by the size classes and source locations # which contributed to recruitment. if sum(w_per_group[:, grp]) > 0.0 - ew::Vector{Float64} = repeat(w_per_group[:, grp], inner=count(reproductive_sc)) + ew::Vector{Float64} = repeat( + w_per_group[:, grp]; inner=count(reproductive_sc) + ) # Determine combined mean # https://en.wikipedia.org/wiki/Mixture_distribution#Properties recruit_μ::Float64 = sum(settler_means, Weights(ew ./ sum(ew))) # Mean for generation t is determined through Breeder's equation - c_mean_t[grp, 1, sink_loc] = breeders(c_mean_t_1[grp, 1, sink_loc], recruit_μ, h²) + c_mean_t[grp, 1, sink_loc] = breeders( + c_mean_t_1[grp, 1, sink_loc], recruit_μ, h² + ) end end end @@ -739,15 +756,17 @@ function fecundity_scope!( n_classes::Int64 = Int64(n_group_and_size / n_groups) fec_all .= fec_params .* C_cover_t .* site_area - for (i, (s, e)) in enumerate(zip(1:n_classes:n_group_and_size, n_classes:n_classes:n_group_and_size+1)) - @views fec_groups[i, :] .= vec(sum(fec_all[s:e, :], dims=1)) + for (i, (s, e)) in enumerate( + zip(1:n_classes:n_group_and_size, n_classes:n_classes:(n_group_and_size + 1)) + ) + @views fec_groups[i, :] .= vec(sum(fec_all[s:e, :]; dims=1)) end return nothing end function fecundity_scope!( fec_groups::AbstractMatrix{T}, - fec_all::AbstractArray{T, 3}, + fec_all::AbstractArray{T,3}, fec_params::AbstractMatrix{T}, C_cover_t::AbstractArray{T,3}, site_area::AbstractMatrix{T} @@ -756,7 +775,7 @@ function fecundity_scope!( # Dimensions of fec all are [groups ⋅ sizes ⋅ locations] fec_all .= fec_params .* C_cover_t .* reshape(site_area, (1, size(site_area)...)) # Sum over size classes - @views fec_groups[:, :] .= dropdims(sum(fec_all, dims=2), dims=2) + @views fec_groups[:, :] .= dropdims(sum(fec_all; dims=2); dims=2) return nothing end @@ -787,7 +806,8 @@ of 0.9 inside sf(i, j) indicates that species i at site j can only produce `sf` : Array of values ∈ [0,1] indicating reduced fecundity from a baseline. """ function stressed_fecundity(tstep::Int64, a_adapt::Vector{T}, n_adapt::T, - stresspast::Vector{T}, LPdhwcoeff::T, DHWmaxtot::T, LPDprm2::T, n_groups::Int64)::Matrix{T} where {T<:Float64} + stresspast::Vector{T}, LPdhwcoeff::T, DHWmaxtot::T, LPDprm2::T, n_groups::Int64 +)::Matrix{T} where {T<:Float64} ad::Vector{Float64} = @. a_adapt + tstep * n_adapt # using half of DHWmaxtot as a placeholder @@ -797,12 +817,13 @@ function stressed_fecundity(tstep::Int64, a_adapt::Vector{T}, n_adapt::T, # One way around dimensional issue - tmp_ad for each class as the averaged # of the enhanced and unenhanced corals in that class # KA note: this works as it averages over size classes and not across groups. - tmp_ad2::Vector{Float64} = vec(mean(reshape(tmp_ad, Int64(length(tmp_ad) / n_groups), n_groups), dims=1)) + tmp_ad2::Vector{Float64} = vec( + mean(reshape(tmp_ad, Int64(length(tmp_ad) / n_groups), n_groups); dims=1) + ) return 1.0 .- exp.(.-(exp.(-LPdhwcoeff .* (stresspast' .* tmp_ad2 .- LPDprm2)))) end - """ settler_density(α, β, L) @@ -842,7 +863,6 @@ function settler_density(α::T, β::T, L::T)::Float64 where {T<:Float64} return (α .* L) ./ (β .+ L) end - """ recruitment_rate(larval_pool::AbstractArray{T,2}, A::AbstractArray{T}; α=2.5, β=5000.0) @@ -859,14 +879,12 @@ Calculates coral recruitment for each species/group and location. """ function recruitment_rate(larval_pool::AbstractArray{T,2}, A::AbstractArray{T}; α::Union{T,Vector{T}}=2.5, β::Union{T,Vector{T}}=5000.0)::Matrix{T} where {T<:Float64} - sd = settler_density.(α, β, larval_pool) .* A' - @views sd[sd.>0.0] .= rand.(Poisson.(sd[sd.>0.0])) + @views sd[sd .> 0.0] .= rand.(Poisson.(sd[sd .> 0.0])) return sd end - """ settler_cover(fec_scope::T, conn::AbstractMatrix{Float64}, leftover_space::T, α::V, β::V, basal_area_per_settler::V, potential_settlers::T)::T where {T<:Matrix{Float64},V<:Vector{Float64}} @@ -898,8 +916,8 @@ function settler_cover( )::T where {T<:Matrix{Float64},V<:Vector{Float64}} # Determine active sources and sinks - valid_sources::BitVector = vec(sum(conn, dims=2) .> 0.0) - valid_sinks::BitVector = vec(sum(conn, dims=1) .> 0.0) + valid_sources::BitVector = vec(sum(conn; dims=2) .> 0.0) + valid_sinks::BitVector = vec(sum(conn; dims=1) .> 0.0) # Send larvae out into the world (reuse potential_settlers to reduce allocations) # Note, conn rows need not sum to 1.0 as this missing probability accounts for larvae @@ -915,5 +933,6 @@ function settler_cover( # Larvae have landed, work out how many are recruited # Determine area covered by recruited larvae (settler cover) per m^2 # recruits per m^2 per site multiplied by area per settler - return recruitment_rate(potential_settlers, leftover_space; α=α, β=β) .* basal_area_per_settler + return recruitment_rate(potential_settlers, leftover_space; α=α, β=β) .* + basal_area_per_settler end diff --git a/src/ecosystem/corals/growth_expanded.jl b/src/ecosystem/corals/growth_expanded.jl index e873ae72d..41e28a0ee 100644 --- a/src/ecosystem/corals/growth_expanded.jl +++ b/src/ecosystem/corals/growth_expanded.jl @@ -10,11 +10,13 @@ X : Current coral cover, relative to `k` p : additional parameters t : time, unused, so marking with `_` """ -function growthODE_expanded(du::Array{Float64,2}, X::Array{Float64,2}, p::NamedTuple, _::Real)::Nothing +function growthODE_expanded( + du::Array{Float64,2}, X::Array{Float64,2}, p::NamedTuple, _::Real +)::Nothing # `s` refers to sigma holding leftover space for each site in form of: 1 x n_sites s = p.sigma[:, :] - s .= max.(p.k' .- sum(X, dims=1), 0.0) # Make relative to k (max. carrying capacity) + s .= max.(p.k' .- sum(X; dims=1), 0.0) # Make relative to k (max. carrying capacity) s = vec(s) rec = @view p.rec[:, :] # recruitment values @@ -35,7 +37,7 @@ function growthODE_expanded(du::Array{Float64,2}, X::Array{Float64,2}, p::NamedT du[9, :] .= (s .* X[8, :] .* r[8]) .- (s .* X[9, :] .* r[9]) .- X_mb[9, :] du[10, :] .= (s .* X[9, :] .* r[9]) .- (s .* X[10, :] .* r[10]) .- X_mb[10, :] du[11, :] .= (s .* X[10, :] .* r[10]) .- (s .* X[11, :] .* r[11]) .- X_mb[11, :] - du[12, :] .= (s .* X[11, :] .* r[11]).+ (s .* X[11, :] * r[11]) .- X_mb[12, :] + du[12, :] .= (s .* X[11, :] .* r[11]) .+ (s .* X[11, :] * r[11]) .- X_mb[12, :] # Corymbose Acropora du[13, :] .= rec[3, :] .- s .* X[13, :] .* r[13] .- X_mb[13, :] @@ -72,5 +74,5 @@ function growthODE_expanded(du::Array{Float64,2}, X::Array{Float64,2}, p::NamedT # Ensure no non-negative values du .= max.(du, 0.0) - return + return nothing end diff --git a/src/factors/Factors.jl b/src/factors/Factors.jl index 7833dec63..9f37306c7 100644 --- a/src/factors/Factors.jl +++ b/src/factors/Factors.jl @@ -33,19 +33,19 @@ function Factor(val; kwargs...)::Param return Param((; val=val, nt...)) end -function _set_factor_defaults(kwargs::NT) where {NT <: NamedTuple} +function _set_factor_defaults(kwargs::NT) where {NT<:NamedTuple} missing_defaults = (; default_dist_params=kwargs.dist_params) for k in keys(missing_defaults) if !haskey(kwargs, k) - kwargs = (; kwargs..., k=>missing_defaults[k]) + kwargs = (; kwargs..., k => missing_defaults[k]) end end return kwargs end -function _check_has_required_info(kwargs::NT) where {NT <: NamedTuple} +function _check_has_required_info(kwargs::NT) where {NT<:NamedTuple} @assert haskey(kwargs, :ptype) "Missing factor field `ptype`" @assert haskey(kwargs, :dist) "Missing factor field `dist`" @assert haskey(kwargs, :dist_params) "Missing factor field `dist_params`" @@ -53,7 +53,8 @@ function _check_has_required_info(kwargs::NT) where {NT <: NamedTuple} @assert haskey(kwargs, :description) "Missing factor field `description`" param_dist_types = [ - "continuous", "ordered categorical", "unordered categorical", "ordered discrete", "discrete" + "continuous", "ordered categorical", "unordered categorical", "ordered discrete", + "discrete" ] @assert any(occursin.(kwargs[:ptype], param_dist_types)) "`ptype` field is not one of $(param_dist_types)" end @@ -116,10 +117,10 @@ function DiscreteTriangularDist( lb::T, ub::T, peak::T -)::DiscreteNonParametric where {T<:Union{Int64, Float64}} +)::DiscreteNonParametric where {T<:Union{Int64,Float64}} # The lower bound will always resolve to 0 probability # so we extend the lower bound to capture the edge. - _lb, ub, peak = trunc.(Int64, [lb-1, ub, peak]) + _lb, ub, peak = trunc.(Int64, [lb - 1, ub, peak]) dist = TriangularDist(_lb, ub, peak) # Approximate discrete probabilities using extended lower bound. @@ -159,5 +160,5 @@ function DiscreteOrderedUniformDist( options = lb:step:ub n_opts = length(options) - return DiscreteNonParametric(options, fill(1.0/n_opts, n_opts)) + return DiscreteNonParametric(options, fill(1.0 / n_opts, n_opts)) end diff --git a/src/interventions/Interventions.jl b/src/interventions/Interventions.jl index 3156c39c2..4db6c8aa7 100644 --- a/src/interventions/Interventions.jl +++ b/src/interventions/Interventions.jl @@ -7,7 +7,7 @@ Base.@kwdef struct Intervention <: EcoModel dist=DiscreteUniform, dist_params=(-1.0, Float64(length(decision.mcda_methods()))), name="Guided", - description="Choice of MCDA approach.", + description="Choice of MCDA approach." ) N_seed_TA::Param = Factor( 0; @@ -15,7 +15,7 @@ Base.@kwdef struct Intervention <: EcoModel dist=DiscreteOrderedUniformDist, dist_params=(0.0, 1000000.0, 50000.0), # increase in steps of 50K name="Seeded Tabular Acropora", - description="Number of Tabular Acropora to seed per deployment year.", + description="Number of Tabular Acropora to seed per deployment year." ) N_seed_CA::Param = Factor( 0; @@ -23,7 +23,7 @@ Base.@kwdef struct Intervention <: EcoModel dist=DiscreteOrderedUniformDist, dist_params=(0.0, 1000000.0, 50000.0), # increase in steps of 50K name="Seeded Corymbose Acropora", - description="Number of Corymbose Acropora to seed per deployment year.", + description="Number of Corymbose Acropora to seed per deployment year." ) N_seed_SM::Param = Factor( 0; @@ -31,7 +31,7 @@ Base.@kwdef struct Intervention <: EcoModel dist=DiscreteOrderedUniformDist, dist_params=(0.0, 1000000.0, 50000.0), # increase in steps of 50K name="Seeded Small Massives", - description="Number of small massives/encrusting to seed per deployment year.", + description="Number of small massives/encrusting to seed per deployment year." ) min_iv_locations::Param = Factor( 5; @@ -55,7 +55,7 @@ Base.@kwdef struct Intervention <: EcoModel dist=TriangularDist, dist_params=(0.0, 0.3, 0.0), name="Fogging", - description="Assumed reduction in bleaching mortality.", + description="Assumed reduction in bleaching mortality." ) SRM::Param = Factor( 0.0; @@ -63,7 +63,7 @@ Base.@kwdef struct Intervention <: EcoModel dist=TriangularDist, dist_params=(0.0, 7.0, 0.0), name="SRM", - description="Reduction in DHWs due to shading.", + description="Reduction in DHWs due to shading." ) a_adapt::Param = Factor( 0.0; @@ -71,7 +71,7 @@ Base.@kwdef struct Intervention <: EcoModel dist=DiscreteOrderedUniformDist, dist_params=(0.0, 15.0, 0.5), # increase in steps of 0.5 DHW enhancement name="Assisted Adaptation", - description="Assisted adaptation in terms of DHW resistance.", + description="Assisted adaptation in terms of DHW resistance." ) seed_years::Param = Factor( 10; @@ -79,7 +79,7 @@ Base.@kwdef struct Intervention <: EcoModel dist=DiscreteTriangularDist, dist_params=(5.0, 75.0, 5.0), name="Years to Seed", - description="Number of years to seed for.", + description="Number of years to seed for." ) shade_years::Param = Factor( 10; @@ -87,7 +87,7 @@ Base.@kwdef struct Intervention <: EcoModel dist=DiscreteTriangularDist, dist_params=(5.0, 75.0, 5.0), name="Years to Shade", - description="Number of years to shade for.", + description="Number of years to shade for." ) fog_years::Param = Factor( 10; @@ -95,7 +95,7 @@ Base.@kwdef struct Intervention <: EcoModel dist=DiscreteTriangularDist, dist_params=(5.0, 75.0, 5.0), name="Years to fog", - description="Number of years to fog for.", + description="Number of years to fog for." ) plan_horizon::Param = Factor( 5; @@ -103,7 +103,7 @@ Base.@kwdef struct Intervention <: EcoModel dist=DiscreteUniform, dist_params=(0.0, 20.0), name="Planning Horizon", - description="How many years of projected data to take into account when selecting intervention locations (0 only accounts for current deployment year).", + description="How many years of projected data to take into account when selecting intervention locations (0 only accounts for current deployment year)." ) seed_deployment_freq::Param = Factor( 5; @@ -111,7 +111,7 @@ Base.@kwdef struct Intervention <: EcoModel dist=DiscreteUniform, dist_params=(0.0, 15.0), name="Selection Frequency (Seed)", - description="Frequency of seeding deployments (0 deploys once).", + description="Frequency of seeding deployments (0 deploys once)." ) fog_deployment_freq::Param = Factor( 5; @@ -119,7 +119,7 @@ Base.@kwdef struct Intervention <: EcoModel dist=DiscreteUniform, dist_params=(0.0, 15.0), name="Selection Frequency (Fog)", - description="Frequency of fogging deployments (0 deploys once).", + description="Frequency of fogging deployments (0 deploys once)." ) shade_deployment_freq::Param = Factor( 1; @@ -127,7 +127,7 @@ Base.@kwdef struct Intervention <: EcoModel dist=DiscreteUniform, dist_params=(1.0, 15.0), name="Deployment Frequency (Shading)", - description="Frequency of shading deployments.", + description="Frequency of shading deployments." ) seed_year_start::Param = Factor( 2; @@ -135,7 +135,7 @@ Base.@kwdef struct Intervention <: EcoModel dist=DiscreteUniform, dist_params=(0.0, 25.0), name="Seeding Start Year", - description="Start seeding deployments after this number of years has elapsed.", + description="Start seeding deployments after this number of years has elapsed." ) shade_year_start::Param = Factor( 2; @@ -143,7 +143,7 @@ Base.@kwdef struct Intervention <: EcoModel dist=DiscreteUniform, dist_params=(2.0, 25.0), name="Shading Start Year", - description="Start of shading deployments after this number of years has elapsed.", + description="Start of shading deployments after this number of years has elapsed." ) fog_year_start::Param = Factor( 2; @@ -151,7 +151,7 @@ Base.@kwdef struct Intervention <: EcoModel dist=DiscreteUniform, dist_params=(2.0, 25.0), name="Fogging Start Year", - description="Start of fogging deployments after this number of years has elapsed.", + description="Start of fogging deployments after this number of years has elapsed." ) end diff --git a/src/interventions/seeding.jl b/src/interventions/seeding.jl index 2e22afac9..6b6c730cc 100644 --- a/src/interventions/seeding.jl +++ b/src/interventions/seeding.jl @@ -31,7 +31,11 @@ function distribute_seeded_corals( scaled_seed = ((prop_area_avail .* seeded_area.data') ./ seed_loc_k_m²)' #scaled_seed = ((prop_area_avail .* seeded_area') ./ seed_loc_k_m²)' - return DataCube(scaled_seed, taxa=caxes(seeded_area)[1].val.data, locations=1:length(available_space)) + return DataCube( + scaled_seed; + taxa=caxes(seeded_area)[1].val.data, + locations=1:length(available_space) + ) end """ @@ -64,7 +68,7 @@ function seed_corals!( a_adapt::V, Yseed::SubArray, stdev::V, - c_dist_t::Matrix{Float64}, + c_dist_t::Matrix{Float64} )::Nothing where {V<:Vector{Float64}} # Selected locations can fill up over time so avoid locations with no space seed_locs = seed_locs[findall(leftover_space_m²[seed_locs] .> 0.0)] @@ -73,7 +77,7 @@ function seed_corals!( scaled_seed = distribute_seeded_corals( loc_k_area[seed_locs], leftover_space_m²[seed_locs], - seeded_area, + seeded_area ) # Seed each location and log @@ -96,9 +100,10 @@ function seed_corals!( # Truncated normal distributions for deployed corals # Assume same stdev and bounds as original - tn::Vector{Float64} = truncated_normal_mean.( - a_adapt[seed_sc], stdev[seed_sc], 0.0, a_adapt[seed_sc] .+ HEAT_UB, - ) + tn::Vector{Float64} = + truncated_normal_mean.( + a_adapt[seed_sc], stdev[seed_sc], 0.0, a_adapt[seed_sc] .+ HEAT_UB + ) # If seeding an empty location, no need to do any further calculations if all(isapprox.(w_taxa[:, i], 1.0)) @@ -116,7 +121,7 @@ function seed_corals!( return nothing end function seed_corals!( - cover::AbstractArray{Float64, 3}, + cover::AbstractArray{Float64,3}, loc_k_area::Vector{T}, leftover_space_m²::Vector{T}, seed_locs::Vector{Int64}, @@ -125,7 +130,7 @@ function seed_corals!( a_adapt::Matrix{T}, Yseed::SubArray, stdev::Matrix{T}, - c_dist_t::Array{Float64, 3}, + c_dist_t::Array{Float64,3} )::Nothing where {T<:Float64} # Selected locations can fill up over time so avoid locations with no space seed_locs = seed_locs[findall(leftover_space_m²[seed_locs] .> 0.0)] @@ -135,7 +140,7 @@ function seed_corals!( scaled_seed = distribute_seeded_corals( loc_k_area[seed_locs], leftover_space_m²[seed_locs], - seeded_area, + seeded_area ) # Seed each location and log @@ -158,9 +163,10 @@ function seed_corals!( # Truncated normal distributions for deployed corals # Assume same stdev and bounds as original - tn::Vector{Float64} = truncated_normal_mean.( - a_adapt[seed_sc], stdev[seed_sc], 0.0, a_adapt[seed_sc] .+ HEAT_UB, - ) + tn::Vector{Float64} = + truncated_normal_mean.( + a_adapt[seed_sc], stdev[seed_sc], 0.0, a_adapt[seed_sc] .+ HEAT_UB + ) # If seeding an empty location, no need to do any further calculations if all(isapprox.(w_taxa[:, i], 1.0)) diff --git a/src/io/ResultSet.jl b/src/io/ResultSet.jl index 3192b36a7..0c727991b 100644 --- a/src/io/ResultSet.jl +++ b/src/io/ResultSet.jl @@ -55,7 +55,7 @@ function ResultSet( wave_stats_set::Dict, conn_data::Dict, site_data::DataFrame, - model_spec::DataFrame, + model_spec::DataFrame )::ResultSet rcp = "RCP" in keys(input_set.attrs) ? input_set.attrs["RCP"] : input_set.attrs["rcp"] return ADRIAResultSet(input_set.attrs["name"], @@ -79,13 +79,17 @@ function ResultSet( DataCube(log_set["seed"], Symbol.(Tuple(log_set["seed"].attrs["structure"]))), DataCube(log_set["fog"], Symbol.(Tuple(log_set["fog"].attrs["structure"]))), DataCube(log_set["shade"], Symbol.(Tuple(log_set["shade"].attrs["structure"]))), - DataCube(log_set["coral_dhw_log"], Symbol.(Tuple(log_set["coral_dhw_log"].attrs["structure"]))), + DataCube( + log_set["coral_dhw_log"], + Symbol.(Tuple(log_set["coral_dhw_log"].attrs["structure"])) + ) ) end function _rankings_data(rankings_set::ZArray{T})::YAXArray{T} where {T} ax_names = Symbol.(Tuple(rankings_set.attrs["structure"])) - ax_labels::Vector{Union{UnitRange{Int64},Vector{Symbol}}} = range.([1], size(rankings_set)) + ax_labels::Vector{Union{UnitRange{Int64},Vector{Symbol}}} = + range.([1], size(rankings_set)) # Replace intervention intervention_idx = findfirst(x -> x == :intervention, ax_names) @@ -99,7 +103,9 @@ end Helper function to copy environmental data layer statistics from data store. """ -function _copy_env_data(src::String, dst::String, folder_name::String, subdir=""::String)::Nothing +function _copy_env_data( + src::String, dst::String, folder_name::String, subdir=""::String +)::Nothing src_dir = joinpath(src, folder_name, subdir) dst_dir = joinpath(dst, folder_name, subdir) mkpath(dst_dir) @@ -126,8 +132,8 @@ function combine_results(result_sets...)::ResultSet # Ensure all sim constants are identical @assert all([ - result_sets[i].sim_constants == result_sets[i+1].sim_constants - for i in 1:(length(result_sets)-1) + result_sets[i].sim_constants == result_sets[i + 1].sim_constants + for i in 1:(length(result_sets) - 1) ]) # Ensure all result sets were from the same version of ADRIA @@ -161,7 +167,7 @@ function combine_results(result_sets...)::ResultSet envlayer.connectivity_fn, dirname(envlayer.DHW_fn), dirname(envlayer.wave_fn), - envlayer.timeframe, + envlayer.timeframe ) all_inputs = reduce(vcat, [getfield(rs, :inputs) for rs in result_sets]) @@ -176,7 +182,7 @@ function combine_results(result_sets...)::ResultSet rs1.site_ids, rs1.site_area, rs1.site_max_coral_cover, - rs1.site_centroids, + rs1.site_centroids ) # Copy site data into result set @@ -184,7 +190,7 @@ function combine_results(result_sets...)::ResultSet cp( attrs[:site_data_file], joinpath(new_loc, SPATIAL_DATA, basename(attrs[:site_data_file])); - force=true, + force=true ) # Store copy of model specification as CSV @@ -199,7 +205,7 @@ function combine_results(result_sets...)::ResultSet fill_as_missing=false, path=input_loc, chunks=(1, input_dims[2]), - attrs=attrs, + attrs=attrs ) # Store post-processed table of input parameters. @@ -225,9 +231,9 @@ function combine_results(result_sets...)::ResultSet rs_scen_len = size(s_log, :scenarios) try - n_log[:, :, scen_id:(scen_id+(rs_scen_len-1))] .= s_log + n_log[:, :, scen_id:(scen_id + (rs_scen_len - 1))] .= s_log catch - n_log[:, :, :, scen_id:(scen_id+(rs_scen_len-1))] .= s_log + n_log[:, :, :, scen_id:(scen_id + (rs_scen_len - 1))] .= s_log end scen_id = scen_id + rs_scen_len @@ -245,7 +251,7 @@ function combine_results(result_sets...)::ResultSet dim_struct[:unique_site_ids] = rs1.site_ids end - result_dims = (size(rs1.outcomes[m_name])[1:(end-1)]..., n_scenarios) + result_dims = (size(rs1.outcomes[m_name])[1:(end - 1)]..., n_scenarios) m_store = zcreate( Float32, result_dims...; @@ -255,9 +261,9 @@ function combine_results(result_sets...)::ResultSet z_store.folder, RESULTS, string(m_name)), - chunks=(result_dims[1:(end-1)]..., 1), + chunks=(result_dims[1:(end - 1)]..., 1), attrs=dim_struct, - compressor=compressor, + compressor=compressor ) # Copy results over @@ -265,9 +271,9 @@ function combine_results(result_sets...)::ResultSet for rs in result_sets rs_scen_len = size(rs.outcomes[m_name], :scenarios) try - m_store[:, :, scen_id:(scen_id+(rs_scen_len-1))] .= rs.outcomes[m_name] + m_store[:, :, scen_id:(scen_id + (rs_scen_len - 1))] .= rs.outcomes[m_name] catch - m_store[:, :, :, scen_id:(scen_id+(rs_scen_len-1))] .= rs.outcomes[m_name] + m_store[:, :, :, scen_id:(scen_id + (rs_scen_len - 1))] .= rs.outcomes[m_name] end scen_id = scen_id + rs_scen_len @@ -434,11 +440,11 @@ Extract parameters for a specific model component from exported model specificat """ function component_params(rs::ResultSet, component::T)::DataFrame where {T} spec = rs.model_spec - return spec[spec.component.==string(component), :] + return spec[spec.component .== string(component), :] end function component_params(rs::ResultSet, components::Vector{T})::DataFrame where {T} spec = rs.model_spec - return spec[spec.component.∈[replace.(string.(components), "ADRIA." => "")], :] + return spec[spec.component .∈ [replace.(string.(components), "ADRIA." => "")], :] end """ diff --git a/src/io/initial_coral_cover.jl b/src/io/initial_coral_cover.jl index ebdd6a041..876d8f70e 100644 --- a/src/io/initial_coral_cover.jl +++ b/src/io/initial_coral_cover.jl @@ -10,7 +10,9 @@ Load initial coral cover data from netCDF. """ function load_initial_cover(data_fn::String)::YAXArray _dim_names_replace = [:covers => :species, :reef_siteid => :sites] - return _split_cover(load_nc_data(data_fn, "covers"; dim_names_replace=_dim_names_replace)) + return _split_cover( + load_nc_data(data_fn, "covers"; dim_names_replace=_dim_names_replace) + ) end function load_initial_cover(n_group_and_size::Int64, n_sites::Int64)::YAXArray @warn "Using random initial coral cover" @@ -34,8 +36,9 @@ function _split_cover(cover::YAXArray)::YAXArray # Build initial cover final data n_groups::Int64, n_sizes::Int64 = size(init_cover_weights) n_groups_sizes::Int64 = n_groups * n_sizes - cover_data::Matrix{Float64} = reshape(init_cover_weights', n_groups_sizes) .* - repeat(cover[selected_groups, :]; inner=(n_sizes, 1)) + cover_data::Matrix{Float64} = + reshape(init_cover_weights', n_groups_sizes) .* + repeat(cover[selected_groups, :]; inner=(n_sizes, 1)) return DataCube(cover_data; _cover_labels(cover, n_sizes, selected_groups)...) end @@ -79,8 +82,10 @@ function _init_cover_weights()::Matrix{Float64} bin_edges_area::Matrix{Float64} = colony_mean_area(_bin_edges) cdf_integral::Matrix{Float64} = cdf.(reef_mod_area_dist, bin_edges_area) - init_cover_fracs::Matrix{Float64} = (cdf_integral[:, 2:end] .- cdf_integral[:, 1:(end-1)]) - init_cover_fracs = init_cover_fracs ./ sum(init_cover_fracs, dims=2) + init_cover_fracs::Matrix{Float64} = ( + cdf_integral[:, 2:end] .- cdf_integral[:, 1:(end - 1)] + ) + init_cover_fracs = init_cover_fracs ./ sum(init_cover_fracs; dims=2) return replace!(init_cover_fracs, NaN => 0.0) end diff --git a/src/io/inputs.jl b/src/io/inputs.jl index 9acf7863a..562405560 100644 --- a/src/io/inputs.jl +++ b/src/io/inputs.jl @@ -68,7 +68,7 @@ function load_nc_data( data_fn::String, attr::String; dim_names::Vector{Symbol}=Symbol[], - dim_names_replace::Vector{Pair{Symbol,Symbol}}=Pair{Symbol,Symbol}[], + dim_names_replace::Vector{Pair{Symbol,Symbol}}=Pair{Symbol,Symbol}[] )::YAXArray local data try @@ -83,7 +83,7 @@ function fallback_nc_data( data_fn::String, attr::String; dim_names::Vector{Symbol}=Symbol[], - dim_names_replace::Vector{Pair{Symbol,Symbol}}=Pair{Symbol,Symbol}[], + dim_names_replace::Vector{Pair{Symbol,Symbol}}=Pair{Symbol,Symbol}[] )::YAXArray NetCDF.open(data_fn; mode=NC_NOWRITE) do nc_file data::Array{<:AbstractFloat} = NetCDF.readvar(nc_file, attr) @@ -121,7 +121,7 @@ function _nc_dim_labels( catch err error( "Error loading $data_fn : could not determine number of locations." * - "Detected size: $(size(data)) | Known number of locations: $(length(sites))", + "Detected size: $(size(data)) | Known number of locations: $(length(sites))" ) end diff --git a/src/io/result_io.jl b/src/io/result_io.jl index 5fb137d72..3e9317ec8 100644 --- a/src/io/result_io.jl +++ b/src/io/result_io.jl @@ -42,7 +42,7 @@ function store_env_summary( type::String, file_loc::String, rcp::String, - compressor::Zarr.Compressor, + compressor::Zarr.Compressor )::ZArray stats = summarize_env_data(data_cube) @@ -118,7 +118,7 @@ function scenario_attributes( unique_sites, area, k, - centroids, + centroids )::Dict attrs::Dict{Symbol,Any} = Dict( :name => name, @@ -138,7 +138,7 @@ function scenario_attributes( :site_ids => unique_sites, :site_area => area, :site_max_coral_cover => k, - :site_centroids => centroids, + :site_centroids => centroids ) return attrs @@ -179,7 +179,7 @@ function setup_logs(z_store, unique_sites, n_scens, tf, n_sites, n_group_and_siz attrs = Dict( # Here, "intervention" refers to seeding or shading :structure => ("timesteps", "sites", "intervention", "scenarios"), - :unique_site_ids => unique_sites, + :unique_site_ids => unique_sites ) ranks = zcreate( Float32, @@ -189,12 +189,12 @@ function setup_logs(z_store, unique_sites, n_scens, tf, n_sites, n_group_and_siz fill_as_missing=false, path=log_fn, chunks=(rank_dims[1:3]..., 1), - attrs=attrs, + attrs=attrs ) attrs = Dict( :structure => ("timesteps", "coral_id", "sites", "scenarios"), - :unique_site_ids => unique_sites, + :unique_site_ids => unique_sites ) seed_log = zcreate( Float32, @@ -204,7 +204,7 @@ function setup_logs(z_store, unique_sites, n_scens, tf, n_sites, n_group_and_siz fill_as_missing=false, path=log_fn, chunks=(seed_dims[1:3]..., 1), - attrs=attrs, + attrs=attrs ) attrs = Dict( @@ -219,7 +219,7 @@ function setup_logs(z_store, unique_sites, n_scens, tf, n_sites, n_group_and_siz fill_as_missing=false, path=log_fn, chunks=(fog_dims[1:2]..., 1), - attrs=attrs, + attrs=attrs ) shade_log = zcreate( Float32, @@ -229,7 +229,7 @@ function setup_logs(z_store, unique_sites, n_scens, tf, n_sites, n_group_and_siz fill_as_missing=false, path=log_fn, chunks=(fog_dims[1:2]..., 1), - attrs=attrs, + attrs=attrs ) # TODO: Could log bleaching mortality @@ -242,7 +242,7 @@ function setup_logs(z_store, unique_sites, n_scens, tf, n_sites, n_group_and_siz # Log for coral DHW thresholds attrs = Dict( :structure => ("timesteps", "species", "sites", "scenarios"), - :unique_site_ids => unique_sites, + :unique_site_ids => unique_sites ) local coral_dhw_log @@ -258,7 +258,7 @@ function setup_logs(z_store, unique_sites, n_scens, tf, n_sites, n_group_and_siz fill_as_missing=false, path=log_fn, chunks=(tf, n_group_and_size, n_sites, 1), - attrs=attrs, + attrs=attrs ) else coral_dhw_log = zcreate( @@ -272,7 +272,7 @@ function setup_logs(z_store, unique_sites, n_scens, tf, n_sites, n_group_and_siz fill_as_missing=false, path=log_fn, chunks=(tf, n_group_and_size, 1, 1), - attrs=attrs, + attrs=attrs ) end @@ -363,7 +363,7 @@ function setup_result_store!(domain::Domain, scen_spec::DataFrame)::Tuple fill_as_missing=false, path=input_loc, chunks=input_dims, - attrs=attrs, + attrs=attrs ) # Store table of factor values @@ -390,12 +390,12 @@ function setup_result_store!(domain::Domain, scen_spec::DataFrame)::Tuple end met_names = [:relative_cover, :relative_shelter_volume, - :absolute_shelter_volume, :relative_juveniles, :juvenile_indicator, :coral_evenness, + :absolute_shelter_volume, :relative_juveniles, :juvenile_indicator, :coral_evenness ] dim_struct = Dict( :structure => string.((:timesteps, :sites, :scenarios)), - :unique_site_ids => unique_sites(domain), + :unique_site_ids => unique_sites(domain) ) result_dims::Tuple{Int64,Int64,Int64} = dim_lengths(dim_struct[:structure]) @@ -404,7 +404,7 @@ function setup_result_store!(domain::Domain, scen_spec::DataFrame)::Tuple zcreate(Float32, result_dims...; fill_value=nothing, fill_as_missing=false, path=joinpath(z_store.folder, RESULTS, string(m_name)), - chunks=(result_dims[1:(end-1)]..., 1), + chunks=(result_dims[1:(end - 1)]..., 1), attrs=dim_struct, compressor=COMPRESSOR) for m_name in met_names @@ -439,8 +439,8 @@ function setup_result_store!(domain::Domain, scen_spec::DataFrame)::Tuple "dhw_scenario", joinpath(z_store.folder, ENV_STATS, "dhw"), rcp, - COMPRESSOR, - ), + COMPRESSOR + ) ) push!( wave_stats, @@ -449,8 +449,8 @@ function setup_result_store!(domain::Domain, scen_spec::DataFrame)::Tuple "wave_scenario", joinpath(z_store.folder, ENV_STATS, "wave"), rcp, - COMPRESSOR, - ), + COMPRESSOR + ) ) push!( connectivity, @@ -459,7 +459,7 @@ function setup_result_store!(domain::Domain, scen_spec::DataFrame)::Tuple joinpath(z_store.folder, "connectivity"), rcp, COMPRESSOR - ), + ) ) push!(dhw_stat_names, Symbol("dhw_stat_$rcp")) @@ -475,7 +475,9 @@ function setup_result_store!(domain::Domain, scen_spec::DataFrame)::Tuple dhw_stats..., wave_stats..., connectivity..., - setup_logs(z_store, unique_sites(domain), nrow(scen_spec), tf, n_sites, n_group_and_size)..., + setup_logs( + z_store, unique_sites(domain), nrow(scen_spec), tf, n_sites, n_group_and_size + )... ] return domain, @@ -489,9 +491,9 @@ function setup_result_store!(domain::Domain, scen_spec::DataFrame)::Tuple :seed_log, :fog_log, :shade_log, - :coral_dhw_log, + :coral_dhw_log ), - stores, + stores )... ) end @@ -580,11 +582,13 @@ function load_results(result_loc::String)::ResultSet result_loc = replace(result_loc, "\\" => "/") if endswith(result_loc, "/") - result_loc = result_loc[1:(end-1)] + result_loc = result_loc[1:(end - 1)] end # Spatial data - site_data = GDF.read(joinpath(result_loc, SPATIAL_DATA, input_set.attrs["name"] * ".gpkg")) + site_data = GDF.read( + joinpath(result_loc, SPATIAL_DATA, input_set.attrs["name"] * ".gpkg") + ) sort!(site_data, [Symbol(input_set.attrs["site_id_col"])]) # Model specification @@ -625,7 +629,7 @@ function load_results(result_loc::String)::ResultSet input_set.attrs["connectivity_file"], input_set.attrs["DHW_file"], input_set.attrs["wave_file"], - input_set.attrs["timeframe"], + input_set.attrs["timeframe"] ) outcomes = Dict{Symbol,YAXArray}() @@ -661,7 +665,7 @@ function load_results(result_loc::String)::ResultSet """ outcomes[Symbol(basename(sd))] = DataCube( res; - zip(Symbol.(res.attrs["structure"]), [1:s for s in size(res)])..., + zip(Symbol.(res.attrs["structure"]), [1:s for s in size(res)])... ) else rethrow(err) @@ -680,7 +684,7 @@ function load_results(result_loc::String)::ResultSet wave_stat_set, conn_set, site_data, - model_spec, + model_spec ) end function load_results(domain::Domain)::ResultSet @@ -706,6 +710,6 @@ Generate path to the data store of results for the given Domain and RCPs names. function _result_location(d::Domain, rcps::Vector{String})::String return joinpath( ENV["ADRIA_OUTPUT_DIR"], - "$(d.name)__RCPs_$(join(rcps, "_"))__$(d.scenario_invoke_time)", + "$(d.name)__RCPs_$(join(rcps, "_"))__$(d.scenario_invoke_time)" ) end diff --git a/src/io/rme_result_io.jl b/src/io/rme_result_io.jl index abcb0dfe9..9e6849f55 100644 --- a/src/io/rme_result_io.jl +++ b/src/io/rme_result_io.jl @@ -5,7 +5,7 @@ using DataFrames, NetCDF, YAXArrays -struct RMEResultSet{T1, T2, D, G, D1} <: ResultSet +struct RMEResultSet{T1,T2,D,G,D1} <: ResultSet name::String RCP::String @@ -22,7 +22,7 @@ struct RMEResultSet{T1, T2, D, G, D1} <: ResultSet sim_constants::D1 model_spec::DataFrame - outcomes::Dict{Symbol, YAXArray} + outcomes::Dict{Symbol,YAXArray} end """ @@ -71,7 +71,7 @@ function load_results( id_list_fn, DataFrame; header=false, - comment="#", + comment="#" ) _manual_id_corrections!(geodata, id_list) @@ -99,8 +99,9 @@ function load_results( inputs::DataFrame = _get_inputs(input_path) # Counterfactual scenario if outplant area and enrichment area are both 0 - scenario_groups::Dict{Symbol, BitVector} = - _construct_scenario_groups(inputs, length(raw_set.scenarios)) + scenario_groups::Dict{Symbol,BitVector} = _construct_scenario_groups( + inputs, length(raw_set.scenarios) + ) env_layer_md::EnvLayer = EnvLayer( data_dir, @@ -114,7 +115,7 @@ function load_results( timeframe ) - outcomes::Dict{Symbol, YAXArray} = Dict(); + outcomes::Dict{Symbol,YAXArray} = Dict() for (key, cube) in raw_set.cubes outcomes[key] = _reformat_cube(RMEResultSet, cube) end @@ -157,7 +158,7 @@ function _get_inputs(filepath::String)::DataFrame @warn "Unable to find scenario spec at $(filepath). Skipping." return DataFrame() end - return inputs = CSV.read(filepath, DataFrame, header=true) + return inputs = CSV.read(filepath, DataFrame; header=true) end """ @@ -169,23 +170,25 @@ groupings for compatibility with ADRIA scenario plotting. function _construct_scenario_groups( inputs::DataFrame, n_scenarios::Int -)::Dict{Symbol, BitVector} +)::Dict{Symbol,BitVector} if size(inputs) == (0, 0) return Dict(:counterfactual => BitVector([true for _ in 1:n_scenarios])) end counterfactual_scens::BitVector = BitVector([ p_a == 0.0 && e_a == 0 for (p_a, e_a) - in zip(inputs.outplant_area_pct, inputs.enrichment_area_pct) + in + zip(inputs.outplant_area_pct, inputs.enrichment_area_pct) ]) # Intervened if not counterfactual intervened_scens::BitVector = BitVector([ p_a != 0 || e_a != 0 for (p_a, e_a) - in zip(inputs.outplant_area_pct, inputs.enrichment_area_pct) + in + zip(inputs.outplant_area_pct, inputs.enrichment_area_pct) ]) - scenario_groups::Dict{Symbol, BitVector} = Dict() + scenario_groups::Dict{Symbol,BitVector} = Dict() # If the runs do not contain counterfactual or intervened runs exclude the key if any(counterfactual_scens) @@ -252,7 +255,7 @@ function _create_model_spec(scenario_spec::DataFrame)::DataFrame "Enrichment Location Count" ] - default_df = DataFrame( + default_df = DataFrame(; component="Intervention", fieldname=fieldname, description=descriptions, @@ -261,7 +264,9 @@ function _create_model_spec(scenario_spec::DataFrame)::DataFrame # Field names are the same as column names fieldnames::Vector{Symbol} = Symbol.(names(scenario_spec)) - inds::Vector{Int} = [findfirst(x -> x == fname, default_df.fieldname) for fname in fieldnames] + inds::Vector{Int} = [ + findfirst(x -> x == fname, default_df.fieldname) for fname in fieldnames + ] return default_df[inds, :] end @@ -272,14 +277,14 @@ function Base.show(io::IO, mime::MIME"text/plain", rs::RMEResultSet) locations = length(rs.outcomes[:total_cover].sites) tf = rs.env_layer_md.timeframe - println(""" - Name: $(rs.name) + return println(""" + Name: $(rs.name) - Results stored at: $(rs.env_layer_md.dpkg_path) + Results stored at: $(rs.env_layer_md.dpkg_path) - RCP(s) represented: $(rcps) - Scenarios run: $(scens) - Number of locations: $(locations) - Timesteps: $(tf) - """) + RCP(s) represented: $(rcps) + Scenarios run: $(scens) + Number of locations: $(locations) + Timesteps: $(tf) + """) end diff --git a/src/io/sampling.jl b/src/io/sampling.jl index 8963ffcbd..cb7df5a09 100644 --- a/src/io/sampling.jl +++ b/src/io/sampling.jl @@ -8,7 +8,9 @@ import Distributions: sample import Surrogates.QuasiMonteCarlo as QMC import Surrogates.QuasiMonteCarlo: SobolSample, OwenScramble -const DISCRETE_FACTOR_TYPES = ["ordered categorical", "unordered categorical", "ordered discrete"] +const DISCRETE_FACTOR_TYPES = [ + "ordered categorical", "unordered categorical", "ordered discrete" +] """ adjust_samples(d::Domain, df::DataFrame)::DataFrame @@ -27,19 +29,20 @@ function adjust_samples(spec::DataFrame, df::DataFrame)::DataFrame depth_offsets = component_params(spec, DepthThresholds) # If counterfactual, set all intervention options to 0.0 - df[df.guided.==-1.0, filter(x -> x ∉ [:guided, :heritability], interv.fieldname)] .= 0.0 + df[df.guided .== -1.0, filter(x -> x ∉ [:guided, :heritability], interv.fieldname)] .= + 0.0 # If unguided/counterfactual, set all preference criteria, except those related to depth, to 0. non_depth_names = vcat( seed_weights.fieldname, fog_weights.fieldname ) - df[df.guided.==0.0, non_depth_names] .= 0.0 # Turn off weights for unguided - df[df.guided.==-1.0, non_depth_names] .= 0.0 # Turn off weights for cf - df[df.guided.==-1.0, depth_offsets.fieldname] .= 0.0 # No depth offsets for cf + df[df.guided .== 0.0, non_depth_names] .= 0.0 # Turn off weights for unguided + df[df.guided .== -1.0, non_depth_names] .= 0.0 # Turn off weights for cf + df[df.guided .== -1.0, depth_offsets.fieldname] .= 0.0 # No depth offsets for cf # If unguided, set planning horizon to 0. - df[df.guided.==0.0, :plan_horizon] .= 0.0 + df[df.guided .== 0.0, :plan_horizon] .= 0.0 # If no seeding is to occur, set related variables to 0 not_seeded = (df.N_seed_TA .== 0) .& (df.N_seed_CA .== 0) .& (df.N_seed_SM .== 0) @@ -88,7 +91,9 @@ Notes: # Returns Scenario specification """ -function sample(dom::Domain, n::Int, sample_method=SobolSample(R=OwenScramble(base=2, pad=32)))::DataFrame +function sample( + dom::Domain, n::Int, sample_method=SobolSample(; R=OwenScramble(; base=2, pad=32)) +)::DataFrame n > 0 ? n : throw(DomainError(n, "`n` must be > 0")) return sample(model_spec(dom), n, sample_method) end @@ -110,7 +115,12 @@ Notes: # Returns Scenario specification """ -function sample(dom::Domain, n::Int, component::Type, sample_method=SobolSample(R=OwenScramble(base=2, pad=32)))::DataFrame +function sample( + dom::Domain, + n::Int, + component::Type, + sample_method=SobolSample(; R=OwenScramble(; base=2, pad=32)) +)::DataFrame n > 0 ? n : throw(DomainError(n, "`n` must be > 0")) spec = component_params(dom.model, component) @@ -133,14 +143,14 @@ Scenario specification function sample( spec::DataFrame, n::Int64, - sample_method=SobolSample(R=OwenScramble(base=2, pad=32)) + sample_method=SobolSample(; R=OwenScramble(; base=2, pad=32)) )::DataFrame if contains(string(sample_method), "SobolSample") && !ispow2(n) throw(DomainError(n, "`n` must be a power of 2 when using the Sobol' sampler")) end # Select non-constant params - vary_vars = spec[spec.is_constant.==false, [:fieldname, :dist, :dist_params]] + vary_vars = spec[spec.is_constant .== false, [:fieldname, :dist, :dist_params]] n_vary_params = size(vary_vars, 1) if n_vary_params == 0 throw(DomainError(n_vary_params, "Number of parameters to perturb must be > 0")) @@ -163,7 +173,7 @@ function sample( # Combine varying and constant values (constant params use their indicated default vals) full_df = hcat(fill.(spec.val, n)...) - full_df[:, spec.is_constant.==false] .= samples + full_df[:, spec.is_constant .== false] .= samples # Ensure unguided scenarios do not have superfluous factor combinations return adjust_samples(spec, DataFrame(full_df, spec.fieldname)) @@ -183,9 +193,18 @@ Coral factors are set to their default values and are not perturbed or sampled. # Returns Scenario specification """ -function sample_selection(d::Domain, n::Int64, sample_method=SobolSample(R=OwenScramble(base=2, pad=32)))::DataFrame +function sample_selection( + d::Domain, n::Int64, sample_method=SobolSample(; R=OwenScramble(; base=2, pad=32)) +)::DataFrame subset_spec = component_params( - d.model, [EnvironmentalLayer, Intervention, SeedCriteriaWeights, FogCriteriaWeights, DepthThresholds] + d.model, + [ + EnvironmentalLayer, + Intervention, + SeedCriteriaWeights, + FogCriteriaWeights, + DepthThresholds + ] ) # Only sample guided intervention scenarios @@ -213,7 +232,9 @@ Generate only counterfactual scenarios. # Returns Scenario specification """ -function sample_cf(d::Domain, n::Int64, sample_method=SobolSample(R=OwenScramble(base=2, pad=32)))::DataFrame +function sample_cf( + d::Domain, n::Int64, sample_method=SobolSample(; R=OwenScramble(; base=2, pad=32)) +)::DataFrame spec_df = model_spec(d) # Unguided scenarios only @@ -256,12 +277,14 @@ Generate only guided scenarios. # Returns Scenario specification """ -function sample_guided(d::Domain, n::Int64, sample_method=SobolSample(R=OwenScramble(base=2, pad=32)))::DataFrame +function sample_guided( + d::Domain, n::Int64, sample_method=SobolSample(; R=OwenScramble(; base=2, pad=32)) +)::DataFrame spec_df = model_spec(d) # Remove unguided scenarios as an option # Sample without unguided (i.e., values >= 1), then revert back to original model spec - if !(spec_df[spec_df.fieldname.==:guided, :is_constant][1]) + if !(spec_df[spec_df.fieldname .== :guided, :is_constant][1]) _adjust_guided_lower_bound!(spec_df, 1) spec_df[!, :is_constant] .= spec_df[!, :lower_bound] .== spec_df[!, :upper_bound] end @@ -282,7 +305,9 @@ Generate only unguided scenarios. # Returns Scenario specification """ -function sample_unguided(d::Domain, n::Int64, sample_method=SobolSample(R=OwenScramble(base=2, pad=32)))::DataFrame +function sample_unguided( + d::Domain, n::Int64, sample_method=SobolSample(; R=OwenScramble(; base=2, pad=32)) +)::DataFrame spec_df = model_spec(d) # Fix guided factor to 0 (i.e., unguided scenarios only) @@ -309,7 +334,8 @@ function _deactivate_interventions(to_update::DataFrame)::Nothing cols = Symbol[fn for fn in intervs.fieldname if fn != :guided] for c in cols _row = to_update.fieldname .== c - _dparams = length(to_update[_row, :dist_params][1]) == 2 ? (0.0, 0.0) : (0.0, 0.0, 0.0) + _dparams = + length(to_update[_row, :dist_params][1]) == 2 ? (0.0, 0.0) : (0.0, 0.0, 0.0) dval = _is_discrete_factor(to_update[_row, :ptype][1]) ? 0 : 0.0 to_update[_row, [:val, :lower_bound, :upper_bound, :dist_params, :is_constant]] .= @@ -344,22 +370,22 @@ fix_factor!(dom; guided=3, N_seed_TA=1e6) """ function fix_factor!(d::Domain, factor::Symbol)::Nothing params = DataFrame(d.model) - default_val = params[params.fieldname.==factor, :val][1] + default_val = params[params.fieldname .== factor, :val][1] - dist_params = params[params.fieldname.==factor, :dist_params][1] + dist_params = params[params.fieldname .== factor, :dist_params][1] new_params = Tuple(fill(default_val, length(dist_params))) - params[params.fieldname.==factor, :dist_params] .= [new_params] + params[params.fieldname .== factor, :dist_params] .= [new_params] update!(d, params) return nothing end function fix_factor!(d::Domain, factor::Symbol, val::Real)::Nothing params = DataFrame(d.model) - params[params.fieldname.==factor, :val] .= val + params[params.fieldname .== factor, :val] .= val - dist_params = params[params.fieldname.==factor, :dist_params][1] + dist_params = params[params.fieldname .== factor, :dist_params][1] new_dist_params = Tuple(fill(val, length(dist_params))) - params[params.fieldname.==factor, :dist_params] .= [new_dist_params] + params[params.fieldname .== factor, :dist_params] .= [new_dist_params] update!(d, params) return nothing @@ -374,7 +400,9 @@ function fix_factor!(d::Domain; factors...)::Nothing params[target_order, :val] .= factor_vals dist_params = params[target_order, :dist_params] - new_dist_params = [Tuple(fill(v, length(d))) for (v, d) in zip(factor_vals, dist_params)] + new_dist_params = [ + Tuple(fill(v, length(d))) for (v, d) in zip(factor_vals, dist_params) + ] params[target_order, :dist_params] .= new_dist_params update!(d, params) @@ -437,7 +465,7 @@ end """ function get_attr(dom::Domain, factor::Symbol, attr::Symbol) ms = model_spec(dom) - return ms[ms.fieldname.==factor, attr][1] + return ms[ms.fieldname .== factor, attr][1] end """ @@ -469,9 +497,9 @@ function set_factor_bounds(dom::Domain, factor::Symbol, new_dist_params::Tuple): new_val = mean(new_dist_params[1:2]) ms = model_spec(dom) - ms[ms.fieldname.==factor, :dist_params] .= [new_dist_params] + ms[ms.fieldname .== factor, :dist_params] .= [new_dist_params] (old_val isa Int) && (new_val = round(new_val)) - ms[ms.fieldname.==factor, :val] .= oftype(old_val, new_val) + ms[ms.fieldname .== factor, :val] .= oftype(old_val, new_val) ms[!, :is_constant] .= (ms[!, :lower_bound] .== ms[!, :upper_bound]) update!(dom, ms) diff --git a/src/main_app.jl b/src/main_app.jl index 4156a82ca..89077e8f1 100644 --- a/src/main_app.jl +++ b/src/main_app.jl @@ -1,15 +1,12 @@ using Plots, Plots.Measures - const valid_methods = ["run", "load", "help"] - """ Main entry point for ADRIA application. """ function julia_main()::Cint - method = ARGS[1] if method == "run" adria_cmd_run() @@ -37,7 +34,6 @@ function julia_main()::Cint return 0 # if things finished successfully end - function adria_cmd_run() config = TOML.parsefile("config.toml") ENV["ADRIA_OUTPUT_DIR"] = config["results"]["output_dir"] @@ -45,7 +41,7 @@ function adria_cmd_run() data_pkg_loc = ARGS[2] rcp = ARGS[3] scenario_file = ARGS[4] - scenarios = CSV.read(scenario_file, DataFrame, comment="#") + scenarios = CSV.read(scenario_file, DataFrame; comment="#") # If number of scenarios <= 4, not worth multiprocessing... if nrow(scenarios) > 4 @@ -69,19 +65,17 @@ function adria_cmd_run() println("Results stored in: $(ADRIA.result_location(res))") - _indicative_result_display(res) + return _indicative_result_display(res) end - function adria_cmd_load() res_loc = ARGS[2] println("Loading results stored in: $(res_loc)") res = ADRIA.load_results(res_loc) - _indicative_result_display(res) + return _indicative_result_display(res) end - """ Display results for indicative purposes, just to demonstrate things are working. Not intended for production. @@ -101,39 +95,38 @@ function _indicative_result_display(res) upper = Y_no[:upper_95] lower = Y_no[:lower_95] - p = plot(upper, fillrange=lower, color=:lightsalmon1, alpha=0.8, label="") - plot!(Y_no[:median], label="No Deployment median", linecolor=:red, alpha=0.8) - + p = plot(upper; fillrange=lower, color=:lightsalmon1, alpha=0.8, label="") + plot!(Y_no[:median]; label="No Deployment median", linecolor=:red, alpha=0.8) # Unguided Deployment upper = Y_ung[:upper_95] lower = Y_ung[:lower_95] - p = plot!(upper, fillrange=lower, color=:lightblue2, alpha=0.8, label="") - plot!(Y_ung[:median], label="Unguided median", linecolor=:blue, alpha=0.5) - + p = plot!(upper; fillrange=lower, color=:lightblue2, alpha=0.8, label="") + plot!(Y_ung[:median]; label="Unguided median", linecolor=:blue, alpha=0.5) # Guided upper = Y_g[:upper_95] lower = Y_g[:lower_95] - plot!(upper, fillrange=lower, color=:lightseagreen, alpha=0.4, label="") - plot!(Y_g[:median], + plot!(upper; fillrange=lower, color=:lightseagreen, alpha=0.4, label="") + plot!(Y_g[:median]; label="Guided median", linecolor=:green, alpha=0.4, xlabel="Year", ylabel="Relative Cover", xticks=(1:75, year_axis)) - p2 = plot(Y_ung[:mean] - Y_no[:mean], label="Guided - No Deployment (μ)", + p2 = plot(Y_ung[:mean] - Y_no[:mean]; label="Guided - No Deployment (μ)", xlabel="Year", ylabel="δ Relative Cover", xticks=(1:75, year_axis), color=:red ) - plot!(Y_g[:mean] - Y_ung[:mean], label="Guided - Unguided (μ)", color=:blue) + plot!(Y_g[:mean] - Y_ung[:mean]; label="Guided - Unguided (μ)", color=:blue) - fig = plot(p, p2, size=(1000, 500), layout=(1, 2), left_margin=5mm, bottom_margin=5mm, xrotation=45, + fig = plot(p, p2; size=(1000, 500), layout=(1, 2), left_margin=5mm, bottom_margin=5mm, + xrotation=45, legend=:best, fg_legend=:transparent, bg_legend=:transparent) # display(fig) # gui(fig) - savefig(joinpath(ENV["ADRIA_OUTPUT_DIR"], "$(ADRIA.store_name(res)).png")) + return savefig(joinpath(ENV["ADRIA_OUTPUT_DIR"], "$(ADRIA.store_name(res)).png")) # TODO: Force display from commandline # https://discourse.julialang.org/t/how-to-display-the-plots-by-executing-the-file-from-command-line/13822/2 diff --git a/src/metrics/metrics.jl b/src/metrics/metrics.jl index ba1ad2ba7..5fa6cd3e5 100644 --- a/src/metrics/metrics.jl +++ b/src/metrics/metrics.jl @@ -13,12 +13,11 @@ using ADRIA: DataCube, ZeroDataCube, axes_names, axis_labels, axis_index using FLoops using DataFrames -using ADRIA: coral_spec, colony_mean_area, ResultSet, timesteps, site_k_area, site_area, planar_area_params - +using ADRIA: coral_spec, colony_mean_area, ResultSet, timesteps, site_k_area, site_area, + planar_area_params abstract type Outcome end - struct Metric{F<:Function,T<:Tuple,S<:String} <: Outcome func::F dims::T # output dimension axes ? @@ -26,7 +25,6 @@ struct Metric{F<:Function,T<:Tuple,S<:String} <: Outcome end Metric(f, d) = Metric(f, d, "") - """ (f::Metric)(raw, args...; kwargs...) (f::Metric)(rs::ResultSet, args...; kwargs...) @@ -41,7 +39,6 @@ function (f::Metric)(rs::ResultSet, args...; kwargs...)::YAXArray return f.func(rs, args...; kwargs...) end - """ relative_cover(X::AbstractArray{<:Real})::AbstractArray{<:Real} relative_cover(rs::ResultSet)::AbstractArray{<:Real} @@ -56,14 +53,13 @@ Coral cover [0 - 1], relative to available \$k\$ area for a given location. """ function _relative_cover(X::AbstractArray{<:Real})::AbstractArray{<:Real} # Sum over all species and size classes - return dropdims(sum(X, dims=2), dims=2) + return dropdims(sum(X; dims=2); dims=2) end function _relative_cover(rs::ResultSet)::AbstractArray{<:Real} return rs.outcomes[:relative_cover] end relative_cover = Metric(_relative_cover, (:timesteps, :sites, :scenarios)) - """ total_absolute_cover(X::AbstractArray{<:Real}, k_area::Vector{<:Real})::AbstractArray{<:Real} total_absolute_cover(rs::ResultSet)::AbstractArray{<:Real} @@ -89,8 +85,6 @@ function _total_absolute_cover(rs::ResultSet)::AbstractArray{<:Real} end total_absolute_cover = Metric(_total_absolute_cover, (:timesteps, :sites, :scenarios), "m²") - - """ relative_taxa_cover(X::AbstractArray{T}, k_area::Vector{T}, n_groups::Int64) where {T<:Real} relative_taxa_cover(rs::ResultSet) @@ -112,19 +106,23 @@ function _relative_taxa_cover( )::AbstractArray{<:Real} n_steps, n_group_and_size, n_locs = size(X) if n_group_and_size % n_groups != 0 - throw(ArgumentError("Number of functional groups given does not divide n_group_and_size. n_group_and_size: $(n_group_and_size). n_groups: $(n_groups)")) + throw( + ArgumentError( + "Number of functional groups given does not divide n_group_and_size. n_group_and_size: $(n_group_and_size). n_groups: $(n_groups)" + ) + ) end n_sc::Int64 = Int64(n_group_and_size / n_groups) taxa_cover::YAXArray = ZeroDataCube((:timesteps, :taxa), (n_steps, n_groups)) k_cover = zeros(n_steps, n_sc, n_locs) - for (taxa_id, grp) in enumerate([i:i+(n_sc-1) for i in 1:n_sc:n_group_and_size]) + for (taxa_id, grp) in enumerate([i:(i + (n_sc - 1)) for i in 1:n_sc:n_group_and_size]) for (loc, a) in enumerate(k_area) k_cover[:, :, loc] .= X[:, grp, loc] .* a end # Sum over size class groups - taxa_cover[:, taxa_id] = vec(sum(k_cover, dims=(2, 3))) ./ sum(k_area) + taxa_cover[:, taxa_id] = vec(sum(k_cover; dims=(2, 3))) ./ sum(k_area) end return taxa_cover @@ -137,21 +135,29 @@ relative_taxa_cover = Metric(_relative_taxa_cover, (:timesteps, :taxa, :scenario """ relative_loc_taxa_cover(X::AbstractArray{T}, k_area::Vector{T}, n_groups::Int64)::AbstractArray where {T<:Real} """ -function _relative_loc_taxa_cover(X::AbstractArray{T}, k_area::Vector{T}, n_groups::Int64)::AbstractArray where {T<:Real} +function _relative_loc_taxa_cover( + X::AbstractArray{T}, k_area::Vector{T}, n_groups::Int64 +)::AbstractArray where {T<:Real} n_steps, n_group_and_size, n_locs = size(X) if n_group_and_size % n_groups != 0 - throw(ArgumentError("Number of groups must divide n_group_and_size. n_group_and_size $(n_group_and_size), n_groups: $(n_groups)")) + throw( + ArgumentError( + "Number of groups must divide n_group_and_size. n_group_and_size $(n_group_and_size), n_groups: $(n_groups)" + ) + ) end n_sc::Int64 = Int64(n_group_and_size / n_groups) - taxa_cover::YAXArray = ZeroDataCube((:timesteps, :taxa, :sites), (n_steps, n_groups, n_locs)) + taxa_cover::YAXArray = ZeroDataCube( + (:timesteps, :taxa, :sites), (n_steps, n_groups, n_locs) + ) k_cover = zeros(n_steps, n_sc) - for (taxa_id, grp) in enumerate([i:i+(n_sc-1) for i in 1:n_sc:n_group_and_size]) + for (taxa_id, grp) in enumerate([i:(i + (n_sc - 1)) for i in 1:n_sc:n_group_and_size]) for (loc, a) in enumerate(k_area) k_cover .= X[:, grp, loc] .* a # Sum over size class groups - taxa_cover[:, taxa_id, loc] = vec(sum(k_cover, dims=2)) ./ a + taxa_cover[:, taxa_id, loc] = vec(sum(k_cover; dims=2)) ./ a end end @@ -161,7 +167,9 @@ end # return rs.outcomes[:relative_loc_taxa_cover] # end -relative_loc_taxa_cover = Metric(_relative_loc_taxa_cover, (:timesteps, :taxa, :sites, :scenarios)) +relative_loc_taxa_cover = Metric( + _relative_loc_taxa_cover, (:timesteps, :taxa, :sites, :scenarios) +) """ relative_juveniles(X::AbstractArray{T}, coral_spec::DataFrame)::AbstractArray{T} where {T<:Real} @@ -169,11 +177,14 @@ relative_loc_taxa_cover = Metric(_relative_loc_taxa_cover, (:timesteps, :taxa, : Juvenile coral cover relative to total site area. """ -function _relative_juveniles(X::AbstractArray{T}, coral_spec::DataFrame)::AbstractArray{T} where {T<:Real} +function _relative_juveniles( + X::AbstractArray{T}, coral_spec::DataFrame +)::AbstractArray{T} where {T<:Real} # Cover of juvenile corals (< 5cm diameter) - juv_groups = X[species=(coral_spec.class_id .== 1)] .+ X[species=(coral_spec.class_id .== 2)] + juv_groups = + X[species=(coral_spec.class_id .== 1)] .+ X[species=(coral_spec.class_id .== 2)] - return dropdims(sum(juv_groups, dims=:species), dims=:species) + return dropdims(sum(juv_groups; dims=:species); dims=:species) end function _relative_juveniles(rs::ResultSet)::AbstractArray return rs.outcomes[:relative_juveniles] @@ -186,7 +197,9 @@ relative_juveniles = Metric(_relative_juveniles, (:timesteps, :sites, :scenarios Juvenile coral cover in m². """ -function _absolute_juveniles(X::AbstractArray{T}, coral_spec::DataFrame, area::AbstractVector{T})::AbstractArray{T} where {T<:Real} +function _absolute_juveniles( + X::AbstractArray{T}, coral_spec::DataFrame, area::AbstractVector{T} +)::AbstractArray{T} where {T<:Real} return _relative_juveniles(X, coral_spec) .* area' end function _absolute_juveniles(rs::ResultSet)::AbstractArray @@ -200,11 +213,12 @@ absolute_juveniles = Metric(_absolute_juveniles, (:timesteps, :sites, :scenarios Calculate the maximum possible area that can be covered by juveniles for a given m². """ function _max_juvenile_area(coral_params::DataFrame, max_juv_density::Float64=51.8) - max_size_m² = maximum(colony_mean_area(coral_params[coral_params.class_id.==2, :mean_colony_diameter_m])) + max_size_m² = maximum( + colony_mean_area(coral_params[coral_params.class_id .== 2, :mean_colony_diameter_m]) + ) return max_juv_density * max_size_m² end - """ juvenile_indicator(X::AbstractArray{T}, coral_params::DataFrame, k_area::V)::AbstractArray{T} where {T<:Real,V<:Vector{Float64}} juvenile_indicator(rs::ResultSet) @@ -227,14 +241,14 @@ function _juvenile_indicator( # Replace 0 k areas with 1.0 to avoid zero-division error usable_k_area = Float64[k > 0.0 ? k : 1.0 for k in k_area]' - return _absolute_juveniles(X, coral_params, k_area) ./ (_max_juvenile_area(coral_params) .* usable_k_area) + return _absolute_juveniles(X, coral_params, k_area) ./ + (_max_juvenile_area(coral_params) .* usable_k_area) end function _juvenile_indicator(rs::ResultSet)::AbstractArray return rs.outcomes[:juvenile_indicator] end juvenile_indicator = Metric(_juvenile_indicator, (:timesteps, :sites, :scenarios)) - """ coral_evenness(r_taxa_cover::AbstractArray{T})::AbstractArray{T} where {T<:Real} coral_evenness(rs::ResultSet)::AbstractArray{T} where {T} @@ -254,10 +268,11 @@ function _coral_evenness(r_taxa_cover::AbstractArray{T})::AbstractArray{T} where # Sum across groups represents functional diversity # Group evenness (Hill 1973, Ecology 54:427-432) - loc_cover = dropdims(sum(r_taxa_cover, dims=2), dims=2) + loc_cover = dropdims(sum(r_taxa_cover; dims=2); dims=2) simpsons_diversity::YAXArray = ZeroDataCube((:timesteps, :sites), (n_steps, n_locs)) for loc in axes(loc_cover, 2) - simpsons_diversity[:, loc] = 1.0 ./ sum((r_taxa_cover[:, :, loc] ./ loc_cover[:, loc]) .^ 2, dims=2) + simpsons_diversity[:, loc] = + 1.0 ./ sum((r_taxa_cover[:, :, loc] ./ loc_cover[:, loc]) .^ 2; dims=2) end return replace!(simpsons_diversity, NaN => 0.0, Inf => 0.0) ./ n_grps @@ -296,7 +311,8 @@ function _colony_Lcm2_to_m3m2(inputs::YAXArray)::Tuple{Vector{Float64},Vector{Fl # Extract colony diameter (in cm) for each taxa/size class from scenario inputs # Have to be careful to extract data in the correct order, matching coral id - colony_mean_diams_cm::Vector{Float64} = (inputs[At(cs_p.coral_id .* "_mean_colony_diameter_m")] .* 100.0).data + colony_mean_diams_cm::Vector{Float64} = + (inputs[At(cs_p.coral_id .* "_mean_colony_diameter_m")] .* 100.0).data # Colony planar area parameters (see Fig 2B in Aston et al., [1]) # First column is `b`, second column is `a` @@ -304,12 +320,13 @@ function _colony_Lcm2_to_m3m2(inputs::YAXArray)::Tuple{Vector{Float64},Vector{Fl pa_params::Array{Float64,2} = planar_area_params() # Repeat each entry `n_sizes` times to cover the number size classes represented - pa_params = repeat(pa_params, inner=(n_sizes, 1)) + pa_params = repeat(pa_params; inner=(n_sizes, 1)) # Estimate colony volume (litres) based on relationship # established by Aston et al. 2022, for each taxa/size class and scenario # Aston et. al. log-log relationship so we apply `exp()` to transform back to dm³ - colony_litres_per_cm2::Vector{Float64} = exp.(pa_params[:, 1] .+ pa_params[:, 2] .* log.(colony_mean_diams_cm)) + colony_litres_per_cm2::Vector{Float64} = + exp.(pa_params[:, 1] .+ pa_params[:, 2] .* log.(colony_mean_diams_cm)) # Convert from dm^3 to m^3 cm2_to_m3_per_m2::Float64 = 10^-3 @@ -321,7 +338,6 @@ function _colony_Lcm2_to_m3m2(inputs::YAXArray)::Tuple{Vector{Float64},Vector{Fl return colony_vol_m3_per_m2, max_colony_vol_m3_per_m2 end - """ _shelter_species_loop(X::AbstractArray{T1,3}, n_species::Int64, colony_vol_m3_per_m2::Array{F}, max_colony_vol_m3_per_m2::Array{F}, k_area::Array{F})::YAXArray where {T1<:Real,F<:Float64} @@ -352,7 +368,7 @@ function _shelter_species_loop( MSV::Matrix{Float64} = k_area' .* max_colony_vol_m3_per_m2 # in m³ # Ensure zero division does not occur # ASV should be 0.0 where MSV is 0.0 so the end result is 0.0 / 1.0 - MSV[MSV.==0.0] .= 1.0 + MSV[MSV .== 0.0] .= 1.0 # Number of functional groups n_groups::Int64 = size(MSV, 1) @@ -360,22 +376,26 @@ function _shelter_species_loop( n_sizes::Int64 = Int64(n_group_and_size / n_groups) # Loop over each taxa group - RSV::YAXArray = ZeroDataCube((:timesteps, :species, :sites), size(X[species=1:n_groups])) - taxa_max_map = zip([i:(i+n_sizes-1) for i in 1:n_sizes:n_group_and_size], 1:n_groups) # map maximum SV for each group + RSV::YAXArray = ZeroDataCube( + (:timesteps, :species, :sites), size(X[species=1:n_groups]) + ) + taxa_max_map = zip( + [i:(i + n_sizes - 1) for i in 1:n_sizes:n_group_and_size], 1:n_groups + ) # map maximum SV for each group # Work out RSV for each taxa for (sp, sq) in taxa_max_map for site in 1:size(ASV, :sites) - RSV[species=At(sq), sites=At(site)] .= dropdims( - sum(ASV[species=At(sp), sites=At(site)], dims=:species), dims=:species - ) ./ MSV[sq, site] + RSV[species=At(sq), sites=At(site)] .= + dropdims( + sum(ASV[species=At(sp), sites=At(site)]; dims=:species); dims=:species + ) ./ MSV[sq, site] end end return RSV end - """ _shelter_species_loop!(X::YAXArray, ASV::YAXArray, nspecies::Int64, colony_vol_m3_per_m2::V, k_area::V) where {V<:AbstractVector{<:Float64}} @@ -436,7 +456,9 @@ function _absolute_shelter_volume( k_area::Vector{T}, inputs::DataFrameRow )::AbstractArray{T} where {T<:Real} - _inputs::YAXArray = DataCube(Matrix(Vector(inputs)'); scenarios=1:1, params=names(inputs)) + _inputs::YAXArray = DataCube( + Matrix(Vector(inputs)'); scenarios=1:1, params=names(inputs) + ) return _absolute_shelter_volume(X, k_area, _inputs) end function _absolute_shelter_volume( @@ -444,7 +466,9 @@ function _absolute_shelter_volume( k_area::Vector{T}, inputs::DataFrame )::AbstractArray{T} where {T<:Real} - _inputs::YAXArray = DataCube(Matrix(inputs); scenarios=1:size(inputs, 1), factors=names(inputs)) + _inputs::YAXArray = DataCube( + Matrix(inputs); scenarios=1:size(inputs, 1), factors=names(inputs) + ) return _absolute_shelter_volume(X, k_area, _inputs) end function _absolute_shelter_volume( @@ -461,7 +485,7 @@ function _absolute_shelter_volume( _shelter_species_loop!(X, ASV, nspecies, colony_vol, k_area) # Sum over groups and size classes to estimate total shelter volume per site - return dropdims(sum(ASV, dims=:species), dims=:species) + return dropdims(sum(ASV; dims=:species); dims=:species) end function _absolute_shelter_volume( X::YAXArray{T,4}, @@ -479,7 +503,7 @@ function _absolute_shelter_volume( end # Sum over groups and size classes to estimate total shelter volume per site - return dropdims(sum(ASV, dims=:species), dims=:species) + return dropdims(sum(ASV; dims=:species); dims=:species) end function _absolute_shelter_volume(rs::ResultSet)::AbstractArray return rs.outcomes[:absolute_shelter_volume] @@ -571,12 +595,14 @@ function _relative_shelter_volume( nspecies::Int64 = size(X, :species) # Calculate shelter volume of groups and size classes and multiply with covers - colony_vol::Array{Float64}, max_colony_vol::Array{Float64} = _colony_Lcm2_to_m3m2(inputs) + colony_vol::Array{Float64}, max_colony_vol::Array{Float64} = _colony_Lcm2_to_m3m2( + inputs + ) RSV::YAXArray = _shelter_species_loop(X, nspecies, colony_vol, max_colony_vol, k_area) # Sum over groups and size classes to estimate total shelter volume # proportional to the theoretical maximum (per site) - RSV = dropdims(sum(RSV, dims=:species), dims=:species) + RSV = dropdims(sum(RSV; dims=:species); dims=:species) clamp!(RSV, 0.0, 1.0) return RSV @@ -593,15 +619,19 @@ function _relative_shelter_volume( # Result template - six entries, one for each taxa n_groups::Int64 = length(coral_spec().taxa_names) - RSV::YAXArray = ZeroDataCube((:timesteps, :species, :sites, :scenarios), size(X[:, 1:n_groups, :, :])) + RSV::YAXArray = ZeroDataCube( + (:timesteps, :species, :sites, :scenarios), size(X[:, 1:n_groups, :, :]) + ) for scen::Int64 in 1:nscens colony_vol, max_colony_vol = _colony_Lcm2_to_m3m2(inputs[scen, :]) - RSV[scenarios=scen] .= _shelter_species_loop(X[scenarios=scen], nspecies, colony_vol, max_colony_vol, k_area) + RSV[scenarios=scen] .= _shelter_species_loop( + X[scenarios=scen], nspecies, colony_vol, max_colony_vol, k_area + ) end # Sum over groups and size classes to estimate total shelter volume # proportional to the theoretical maximum (per site) - RSV = dropdims(sum(RSV, dims=:species), dims=:species) + RSV = dropdims(sum(RSV; dims=:species); dims=:species) clamp!(RSV, 0.0, 1.0) return RSV @@ -611,7 +641,6 @@ function _relative_shelter_volume(rs::ResultSet)::YAXArray end relative_shelter_volume = Metric(_relative_shelter_volume, (:timesteps, :sites, :scenarios)) - include("pareto.jl") include("ranks.jl") include("reef_indices.jl") @@ -644,5 +673,4 @@ include("utils.jl") # return :(Metric(eval($name), $m.dims)) # end - end diff --git a/src/metrics/pareto.jl b/src/metrics/pareto.jl index 154d24834..5c37eb318 100644 --- a/src/metrics/pareto.jl +++ b/src/metrics/pareto.jl @@ -1,6 +1,5 @@ using StaticArrays - """ dominates(x::Vector{<:Real}, y::Vector{<:Real})::Vector @@ -19,7 +18,6 @@ function dominates(x::AbstractVector{<:Real}, y::AbstractVector{<:Real})::Bool return strict_inequality_found end - """ nds(X::AbstractArray{<:Real}, dist::Int64=0)::Vector{Vector{<:Int}} diff --git a/src/metrics/performance.jl b/src/metrics/performance.jl index fab83715b..cfae3cc72 100644 --- a/src/metrics/performance.jl +++ b/src/metrics/performance.jl @@ -3,7 +3,6 @@ module performance using Statistics, Distributions, DataFrames, StatsBase using ADRIA - """ normalize(vals::AbstractArray{<:Real}) @@ -21,7 +20,6 @@ end """Root Mean Square Error""" RMSE(obs, sim) = (sum((sim .- obs) .^ 2) / length(sim))^0.5 - """ probability(vals::AbstractArray{<:Real}) @@ -31,7 +29,6 @@ function probability(S::AbstractArray{<:Real}) return cdf.(fit(Normal, S), S) end - """ gmd(vals::AbstractVector{<:Real})::Float64 gmd(vals::AbstractMatrix{<:Real}) @@ -66,7 +63,6 @@ function gmd(vals::AbstractMatrix{<:Real}) return gmd.(eachcol(vals)) end - """ temporal_variability(x::AbstractVector{<:Real}) temporal_variability(x::AbstractArray{<:Real, 2}) @@ -104,7 +100,6 @@ function temporal_variability(x::AbstractArray{<:Real}, func_or_data...) return mean([map(f -> f isa Function ? f(x) : f, func_or_data)...]) end - """ intervention_effort(ms, inputs_i) @@ -131,21 +126,26 @@ function intervention_effort(ms::DataFrame, X::DataFrame; :SRM, :seed_years, :shade_years, - :fog_years, - ], + :fog_years + ] ) - - interv_s = ms[findall(in(interv_cols), Symbol.(ms.fieldname)), ["fieldname", "lower_bound", "upper_bound"]] + interv_s = ms[ + findall(in(interv_cols), Symbol.(ms.fieldname)), + ["fieldname", "lower_bound", "upper_bound"] + ] @assert nrow(interv_s) > 0 "No parameters for $(interv_cols) found." ub = interv_s[:, "upper_bound"] lb = interv_s[:, "lower_bound"] - return hcat([intervention_effort(values(X[:, interv_cols[i]]), ub[i], lb[i]) - for i in eachindex(interv_cols)]...) + return hcat( + [ + intervention_effort(values(X[:, interv_cols[i]]), ub[i], lb[i]) + for i in eachindex(interv_cols) + ]... + ) end - """ intervention_diversity(ms, inputs_i) @@ -162,7 +162,6 @@ function intervention_diversity(ms, inputs_i) return mean(gmd(intervention_effort(ms, inputs_i))) end - """ environmental_diversity(ms, inputs_i) @@ -178,7 +177,10 @@ This is referred to as \$E\$. """ function environmental_diversity(ms, inputs_i) env_cols = Symbol.(ADRIA.component_params(ms, ADRIA.EnvironmentalLayer).fieldname) - env_s = ms[findall(in(env_cols), Symbol.(ms.fieldname)), ["fieldname", "lower_bound", "upper_bound"]] + env_s = ms[ + findall(in(env_cols), Symbol.(ms.fieldname)), + ["fieldname", "lower_bound", "upper_bound"] + ] @assert nrow(env_s) > 0 "No parameters for $(env_cols) found." push!(env_s, ["RCP", 26, 85]) # Add lower/upper bound @@ -194,8 +196,7 @@ function environmental_diversity(ms, inputs_i) replace!(Et, NaN => 0.0) end - return mean(mean(Et, dims=1)) + return mean(mean(Et; dims=1)) end - end # module diff --git a/src/metrics/ranks.jl b/src/metrics/ranks.jl index d71f6a518..cc0514d27 100644 --- a/src/metrics/ranks.jl +++ b/src/metrics/ranks.jl @@ -35,7 +35,6 @@ function _collate_ranks(rs::ResultSet, selected; kwargs...)::YAXArray ) end - """ seed_ranks(rs::ResultSet; kwargs...) @@ -82,7 +81,7 @@ end Collates number of ranked locations. """ function _collate_ranked_locs(data::YAXArray)::Matrix{Int64} - locs = zeros(Int64, size.([data], (1,3))...) + locs = zeros(Int64, size.([data], (1, 3))...) Threads.@threads for scen in axes(data, :scenarios) scen_ranks = data[:, :, scen] @@ -155,9 +154,11 @@ function top_n_seeded_sites(rs::ResultSet, n::Int64; kwargs...)::YAXArray r_ids = rs.site_data.reef_siteid min_rank = length(r_ids) + 1 - c_ranks = collect(dropdims(mean(ranked_locs, dims=1), dims=1)) + c_ranks = collect(dropdims(mean(ranked_locs; dims=1); dims=1)) - top_sites = Array{Union{String,Int32,Float32,Missing}}(undef, n, 3, size(ranked_locs, 3)) + top_sites = Array{Union{String,Int32,Float32,Missing}}( + undef, n, 3, size(ranked_locs, 3) + ) for scen in axes(ranked_locs, 3) flat = vec(c_ranks[:, scen]) flat[flat .== 0.0] .= min_rank @@ -230,12 +231,12 @@ function top_N_sites(rs::ResultSet, N::Int64; metric=relative_cover, stat=mean): return top_N_sites(metric(rs), N; stat=stat) end function top_N_sites(data::AbstractArray{<:Real}, N::Int64; stat=mean) - stat_m = dropdims(stat(data, dims=:timesteps), dims=:timesteps) + stat_m = dropdims(stat(data; dims=:timesteps); dims=:timesteps) top_N_sites = zeros(Int64, size(stat_m, :scenarios), N) for scen in axes(stat_m, :scenarios) # sort each scenario according to metric and get indexes - inds = sortperm(stat_m[:, scen], rev=true) + inds = sortperm(stat_m[:, scen]; rev=true) top_N_sites[scen, :] = inds[1:N] end diff --git a/src/metrics/reef_indices.jl b/src/metrics/reef_indices.jl index 4e1c4f3b0..3a77b6910 100644 --- a/src/metrics/reef_indices.jl +++ b/src/metrics/reef_indices.jl @@ -34,7 +34,8 @@ function _reef_condition_index( # Compare outputs against reef condition criteria provided by experts # These are median values for 8 experts. condition_category = [:lower, :very_poor, :poor, :fair, :good, :very_good, :very_good] - criteria = DataCube([ + criteria = DataCube( + [ 0.0 0.0 0.0 0.0 0.05 0.15 0.175 0.15 # Very Poor [0 to these values] 0.15 0.25 0.3 0.25 # Poor @@ -48,9 +49,9 @@ function _reef_condition_index( ) index_metrics = zeros(size(rc)..., 4) @floop for (idx, met) in enumerate([rc, evenness, sv, juves]) - lower = collect(criteria[1:end-1, idx]) + lower = collect(criteria[1:(end - 1), idx]) upper = collect(criteria[2:end, idx]) - met_cp = map(x -> criteria[2:end, idx][lower.<=x. criteria[2:end, idx][lower .<= x .< upper][1], met.data) replace!(met_cp, Inf => 0.9) index_metrics[:, :, :, idx] .= met_cp end @@ -63,16 +64,19 @@ function _reef_condition_index( axes(index_metrics, 2), axes(index_metrics, 3)) - # Find the category name condition score relates to for each metric - rci_cats = [condition_category[argmax(index_metrics[ts, loc, scen, i] .== criteria[:, i])] - for i in axes(index_metrics, 4)] + rci_cats = [ + condition_category[argmax(index_metrics[ts, loc, scen, i] .== criteria[:, i])] + for i in axes(index_metrics, 4) + ] c = counter(rci_cats) # Get the corresponding score values by index (based on RC only) # Because the index is used and not the score value (0.15, 0.25, etc), it will # always align with the correct category ("poor", "good", etc) - scores = dropdims(criteria[condition=At(collect(keys(c))), metric=At([:RC])], dims=2) + scores = dropdims( + criteria[condition=At(collect(keys(c))), metric=At([:RC])]; dims=2 + ) # RCI is assigned the minimunm score of the greatest number of metrics that meet # `threshold`. @@ -80,7 +84,7 @@ function _reef_condition_index( # the score is "poor". # If scores are spread out (e.g., unique values), we return the 2nd lowest score if any(values(c) .>= threshold) - rci[ts, loc, scen] = minimum(scores[values(c).>=threshold]) + rci[ts, loc, scen] = minimum(scores[values(c) .>= threshold]) else rci[ts, loc, scen] = sort(scores)[2] end @@ -152,11 +156,14 @@ function _reef_tourism_index( # TODO: Ryan to reinterpolate to account for no CoTS and no rubble # Apply unique intercepts for each scenario - rti = cat(map(axe -> (intcp[axe] .+ (0.12764 .* rc[:, :, axe]) .+ - (0.31946 .* evenness[:, :, axe]) .+ - (0.11676 .* sv[:, :, axe]) .+ - (-0.0036065 .* juves[:, :, axe]) - ), axes(rc, 3))..., dims=3) + rti = cat( + map( + axe -> ( + intcp[axe] .+ (0.12764 .* rc[:, :, axe]) .+ + (0.31946 .* evenness[:, :, axe]) .+ + (0.11676 .* sv[:, :, axe]) .+ + (-0.0036065 .* juves[:, :, axe]) + ), axes(rc, 3))...; dims=3) return round.(clamp.(rti, 0.1, 0.9), digits=2) end @@ -223,11 +230,15 @@ function _reef_fish_index(rc::AbstractArray, intcp_u1, intcp_u2) slope2 = 1883.3 # Apply unique intercepts for each scenario - rfi = cat(map(axe -> 0.01 .* ( - intcp2[axe] .+ slope2 .* - (intcp1[axe] .+ slope1 .* (rc[:, :, axe] .* 100.0)) - ), - axes(rc, 3))..., + rfi = cat( + map( + axe -> + 0.01 .* ( + intcp2[axe] .+ + slope2 .* + (intcp1[axe] .+ slope1 .* (rc[:, :, axe] .* 100.0)) + ), + axes(rc, 3))...; dims=3) # Calculate total fish biomass, kg km2 @@ -244,8 +255,6 @@ function _reef_fish_index(rs::ResultSet; intcp_u1::Bool=false, intcp_u2::Bool=fa end reef_fish_index = Metric(_reef_fish_index, (:timesteps, :sites, :scenarios)) - - """ scenario_rfi(rfi::YAXArray; kwargs...) scenario_rfi(rs::ResultSet; kwargs...) diff --git a/src/metrics/scenario.jl b/src/metrics/scenario.jl index 8dabd7c14..9a2caf7ad 100644 --- a/src/metrics/scenario.jl +++ b/src/metrics/scenario.jl @@ -22,9 +22,10 @@ Matrix[timesteps ⋅ scenarios] function scenario_trajectory(data::AbstractArray; metric=mean)::YAXArray{<:Real} tf_labels = axis_labels(data, :timesteps) - s::Matrix{eltype(data)} = metric.( - JuliennedArrays.Slices(data[timesteps=At(tf_labels)], axis_index(data, :sites)) - ) + s::Matrix{eltype(data)} = + metric.( + JuliennedArrays.Slices(data[timesteps=At(tf_labels)], axis_index(data, :sites)) + ) return DataCube(s; timesteps=tf_labels, scenarios=1:size(s, 2)) end @@ -35,11 +36,11 @@ end Calculate the mean absolute coral for each scenario for the entire domain. """ function _scenario_total_cover(X::AbstractArray; kwargs...)::AbstractArray{<:Real} - return dropdims(sum(slice_results(X; kwargs...), dims=:sites), dims=:sites) + return dropdims(sum(slice_results(X; kwargs...); dims=:sites); dims=:sites) end function _scenario_total_cover(rs::ResultSet; kwargs...)::AbstractArray{<:Real} tac = total_absolute_cover(rs) - return dropdims(sum(slice_results(tac; kwargs...), dims=:sites), dims=:sites) + return dropdims(sum(slice_results(tac; kwargs...); dims=:sites); dims=:sites) end scenario_total_cover = Metric(_scenario_total_cover, (:timesteps, :scenarios), "m²") @@ -69,12 +70,12 @@ function _scenario_relative_juveniles( kwargs... )::AbstractArray{<:Real} ajuv = call_metric(absolute_juveniles, data, coral_spec; kwargs...) - return dropdims(sum(ajuv, dims=:sites), dims=:sites) / sum(k_area) + return dropdims(sum(ajuv; dims=:sites); dims=:sites) / sum(k_area) end function _scenario_relative_juveniles(rs::ResultSet; kwargs...)::YAXArray # Calculate relative domain-wide cover based on absolute values aj = absolute_juveniles(rs) - return dropdims(sum(aj, dims=:sites), dims=:sites) ./ sum(site_k_area(rs)) + return dropdims(sum(aj; dims=:sites); dims=:sites) ./ sum(site_k_area(rs)) end scenario_relative_juveniles = Metric(_scenario_relative_juveniles, (:timesteps, :scenarios)) @@ -91,11 +92,11 @@ function _scenario_absolute_juveniles( kwargs... )::AbstractArray{<:Real} juv = call_metric(absolute_juveniles, data, coral_spec; kwargs...) - return dropdims(sum(juv, dims=:sites), dims=:sites) / sum(k_area) + return dropdims(sum(juv; dims=:sites); dims=:sites) / sum(k_area) end function _scenario_absolute_juveniles(rs::ResultSet; kwargs...)::AbstractArray{<:Real} # Calculate relative domain-wide cover based on absolute values - return dropdims(sum(absolute_juveniles(rs), dims=:sites), dims=:sites) + return dropdims(sum(absolute_juveniles(rs); dims=:sites); dims=:sites) end scenario_absolute_juveniles = Metric(_scenario_absolute_juveniles, (:timesteps, :scenarios)) @@ -112,10 +113,10 @@ function _scenario_juvenile_indicator( kwargs... )::AbstractArray{<:Real} juv = call_metric(juvenile_indicator, data, coral_spec, k_area; kwargs...) - return dropdims(mean(juv, dims=:sites), dims=:sites) / sum(k_area) + return dropdims(mean(juv; dims=:sites); dims=:sites) / sum(k_area) end function _scenario_juvenile_indicator(rs::ResultSet; kwargs...)::AbstractArray{<:Real} - return dropdims(mean(juvenile_indicator(rs), dims=:sites), dims=:sites) + return dropdims(mean(juvenile_indicator(rs); dims=:sites); dims=:sites) end scenario_juvenile_indicator = Metric(_scenario_juvenile_indicator, (:timesteps, :scenarios)) @@ -127,7 +128,7 @@ Calculate the mean absolute shelter volumes for each scenario for the entire dom """ function _scenario_asv(sv::YAXArray; kwargs...)::AbstractArray{<:Real} sv_sliced = slice_results(sv; kwargs...) - return dropdims(sum(sv_sliced, dims=:sites), dims=:sites) + return dropdims(sum(sv_sliced; dims=:sites); dims=:sites) end function _scenario_asv(rs::ResultSet; kwargs...)::AbstractArray{<:Real} return _scenario_asv(rs.outcomes[:absolute_shelter_volume]; kwargs...) @@ -142,7 +143,7 @@ Calculate the mean relative shelter volumes for each scenario for the entire dom """ function _scenario_rsv(sv::YAXArray; kwargs...)::AbstractArray{<:Real} sv_sliced = slice_results(sv; kwargs...) - return dropdims(mean(sv_sliced, dims=:sites), dims=:sites) + return dropdims(mean(sv_sliced; dims=:sites); dims=:sites) end function _scenario_rsv(rs::ResultSet; kwargs...)::AbstractArray{<:Real} return _scenario_rsv(rs.outcomes[:relative_shelter_volume]; kwargs...) @@ -195,7 +196,7 @@ function scenario_outcomes(rs::ResultSet, metrics::Vector{<:Metric})::YAXArray scen_outcomes = ZeroDataCube(; timesteps=timesteps(rs), scenarios=1:n_scenarios, - outcomes=to_symbol.(metrics), + outcomes=to_symbol.(metrics) ) for (i, metric) in enumerate(metrics) diff --git a/src/metrics/site_level.jl b/src/metrics/site_level.jl index 06912ba6c..11511019f 100644 --- a/src/metrics/site_level.jl +++ b/src/metrics/site_level.jl @@ -103,7 +103,7 @@ function summarize( data::YAXArray{D,T,N,A}, alongs_axis::Vector{Symbol}, metric::Function, - timesteps::Union{UnitRange,Vector{Int64},BitVector}, + timesteps::Union{UnitRange,Vector{Int64},BitVector} )::YAXArray where {D,T,N,A} return summarize(data[timesteps=timesteps], alongs_axis, metric) end diff --git a/src/metrics/temporal.jl b/src/metrics/temporal.jl index ae3c91760..22d5ac2f1 100644 --- a/src/metrics/temporal.jl +++ b/src/metrics/temporal.jl @@ -5,7 +5,6 @@ Provides summary statistics across selected scenarios. import Interpolations: GriddedInterpolation - function summarize_trajectory(data::YAXArray)::Dict{Symbol,AbstractArray{<:Real}} squash = nothing if :sites in axes_names(data) @@ -15,8 +14,10 @@ function summarize_trajectory(data::YAXArray)::Dict{Symbol,AbstractArray{<:Real} end if !isnothing(squash) - summarized::Dict{Symbol,AbstractArray{<:Real}} = Dict(Symbol(f) => collect(dropdims(f(data, dims=squash), dims=squash)) - for f in [mean, median, std, minimum, maximum]) + summarized::Dict{Symbol,AbstractArray{<:Real}} = Dict( + Symbol(f) => collect(dropdims(f(data; dims=squash); dims=squash)) + for f in [mean, median, std, minimum, maximum] + ) else # Only a single scenario so don't bother doing anything summarized = Dict(Symbol(f) => data for f in [mean, median, minimum, maximum]) @@ -39,7 +40,6 @@ function summarize_trajectory(data::YAXArray)::Dict{Symbol,AbstractArray{<:Real} return summarized end - """ summarize_raw(data::YAXArray; kwargs...)::Dict{Symbol,AbstractArray{<:Real}} @@ -53,7 +53,6 @@ function summarize_raw(rs::ResultSet; kwargs...)::Dict{Symbol,AbstractArray{<:Re return summarize_raw(rs.raw; kwargs...) end - # function summarize_rci(rs::ResultSet; kwargs...) # rc::AbstractArray{<:Real} = call_metric(relative_cover, rs.inputs; kwargs...) @@ -72,92 +71,104 @@ end # return summarize_trajectory(rci) # end - """ summarize_total_cover(raw::YAXArray, areas::AbstractArray{<:Real}; kwargs...)::Dict{Symbol,AbstractArray{<:Real}} summarize_total_cover(rs::ResultSet; kwargs...)::Dict{Symbol,AbstractArray{<:Real}} Calculate summarized total absolute cover. """ -function summarize_total_cover(raw::YAXArray, areas::AbstractArray{<:Real}; kwargs...)::Dict{Symbol,AbstractArray{<:Real}} +function summarize_total_cover( + raw::YAXArray, areas::AbstractArray{<:Real}; kwargs... +)::Dict{Symbol,AbstractArray{<:Real}} sites = haskey(kwargs, :sites) ? kwargs[:sites] : (:) tac = call_metric(total_absolute_cover, raw, areas[sites]; kwargs...) - tac = dropdims(sum(tac, dims=:sites), dims=:sites) + tac = dropdims(sum(tac; dims=:sites); dims=:sites) return summarize_trajectory(tac) end function summarize_total_cover(rs::ResultSet; kwargs...)::Dict{Symbol,AbstractArray{<:Real}} - tac = dropdims(sum(slice_results(_total_absolute_cover(rs); kwargs...), dims=:sites), dims=:sites) + tac = dropdims( + sum(slice_results(_total_absolute_cover(rs); kwargs...); dims=:sites); dims=:sites + ) return summarize_trajectory(tac) end - """ summarize_relative_cover(rc::YAXArray; kwargs...)::Dict{Symbol,AbstractArray{<:Real}} summarize_relative_cover(rs::ResultSet, kwargs...)::Dict{Symbol,AbstractArray{<:Real}} Calculate summarized relative cover. """ -function summarize_relative_cover(rc::YAXArray; kwargs...)::Dict{Symbol,AbstractArray{<:Real}} +function summarize_relative_cover( + rc::YAXArray; kwargs... +)::Dict{Symbol,AbstractArray{<:Real}} rc_sliced = slice_results(rc; kwargs...) return summarize_trajectory(rc_sliced) end -function summarize_relative_cover(rs::ResultSet; kwargs...)::Dict{Symbol,AbstractArray{<:Real}} +function summarize_relative_cover( + rs::ResultSet; kwargs... +)::Dict{Symbol,AbstractArray{<:Real}} return summarize_relative_cover(rs.outcomes[:relative_cover]; kwargs...) end - """ summarize_coral_evenness(raw::YAXArray; kwargs...)::Dict{Symbol,AbstractArray{<:Real}} summarize_coral_evenness(rs::ResultSet, kwargs...)::Dict{Symbol,AbstractArray{<:Real}} Calculate summarized coral evenness. """ -function summarize_coral_evenness(raw::YAXArray; kwargs...)::Dict{Symbol,AbstractArray{<:Real}} +function summarize_coral_evenness( + raw::YAXArray; kwargs... +)::Dict{Symbol,AbstractArray{<:Real}} ce::AbstractArray{<:Real} = call_metric(coral_evenness, raw; kwargs...) return summarize_trajectory(ce) end -function summarize_coral_evenness(rs::ResultSet; kwargs...)::Dict{Symbol,AbstractArray{<:Real}} +function summarize_coral_evenness( + rs::ResultSet; kwargs... +)::Dict{Symbol,AbstractArray{<:Real}} return summarize_coral_evenness(rs.raw; kwargs...) end - """ summarize_absolute_shelter_volume(sv::YAXArray; kwargs...)::Dict{Symbol,AbstractArray{<:Real}} summarize_absolute_shelter_volume(rs::ResultSet, kwargs...)::Dict{Symbol,AbstractArray{<:Real}} Calculate summarized coral evenness. """ -function summarize_absolute_shelter_volume(sv::YAXArray; kwargs...)::Dict{Symbol,AbstractArray{<:Real}} +function summarize_absolute_shelter_volume( + sv::YAXArray; kwargs... +)::Dict{Symbol,AbstractArray{<:Real}} return summarize_trajectory(slice_results(sv; kwargs...)) end -function summarize_absolute_shelter_volume(rs::ResultSet; kwargs...)::Dict{Symbol,AbstractArray{<:Real}} +function summarize_absolute_shelter_volume( + rs::ResultSet; kwargs... +)::Dict{Symbol,AbstractArray{<:Real}} sv_sliced = slice_results(rs.outcomes[:absolute_shelter_volume]; kwargs...) return summarize_trajectory(sv_sliced) end - """ summarize_relative_shelter_volume(sv::YAXArray; kwargs...)::Dict{Symbol,AbstractArray{<:Real}} summarize_relative_shelter_volume(rs::ResultSet, kwargs...)::Dict{Symbol,AbstractArray{<:Real}} Calculate summarized coral evenness. """ -function summarize_relative_shelter_volume(sv::YAXArray; kwargs...)::Dict{Symbol,AbstractArray{<:Real}} +function summarize_relative_shelter_volume( + sv::YAXArray; kwargs... +)::Dict{Symbol,AbstractArray{<:Real}} return summarize_trajectory(slice_results(sv; kwargs...)) end -function summarize_relative_shelter_volume(rs::ResultSet; kwargs...)::Dict{Symbol,AbstractArray{<:Real}} +function summarize_relative_shelter_volume( + rs::ResultSet; kwargs... +)::Dict{Symbol,AbstractArray{<:Real}} sv_sliced = slice_results(rs.outcomes[:relative_shelter_volume]; kwargs...) return summarize_trajectory(sv_sliced) end - - function summarize_trajectories(rs::ResultSet, ts_name; kwargs...) sliced = slice_results(rs.outcomes[ts_name]; kwargs...) return summarize_trajectory(sliced) end - """ trajectory_heatmap(data::YAXArray)::HeatMap @@ -176,7 +187,6 @@ function trajectory_heatmap(data::YAXArray)::HeatMap return o end - """ trajectory_heatmap_data(data::YAXArray)::Tuple{Vector{Float64},Vector{Float64},Matrix{Int64}} @@ -188,7 +198,9 @@ Estimate heatmap of trajectories from a 2D dataset. # Returns Tuple of xedges, yedges, and bi-dimensional histogram matrix """ -function trajectory_heatmap_data(data::YAXArray)::Tuple{Vector{Float64},Vector{Float64},Matrix{Int64}} +function trajectory_heatmap_data( + data::YAXArray +)::Tuple{Vector{Float64},Vector{Float64},Matrix{Int64}} o::HeatMap = trajectory_heatmap(data) return collect(o.xedges), collect(o.yedges), o.counts diff --git a/src/metrics/utils.jl b/src/metrics/utils.jl index e3f1acf04..ccde9fdd6 100644 --- a/src/metrics/utils.jl +++ b/src/metrics/utils.jl @@ -52,7 +52,6 @@ function dims(m::Metric)::Tuple return m.dims end - """ ndims(m::Metric)::Int64 @@ -62,7 +61,6 @@ function Base.ndims(m::Metric)::Int64 return length(dims(m)) end - """ call_metric(metric::Union{Function,Metric}, data::YAXArray, args...; kwargs...) @@ -83,13 +81,14 @@ function call_metric(metric::Union{Function,Metric}, data::YAXArray, args...; kw end end - """ slice_results(data::YAXArray; timesteps=(:), species=(:), sites=(:), scenarios=(:)) Slice data as indicated. Dimensions not found in target data are ignored. """ -function slice_results(data::YAXArray; timesteps=(:), species=(:), sites=(:), scenarios=(:))::YAXArray +function slice_results( + data::YAXArray; timesteps=(:), species=(:), sites=(:), scenarios=(:) +)::YAXArray f_dims = (timesteps=timesteps, species=species, sites=sites, scenarios=scenarios) s_names = keys(f_dims) diff --git a/src/scenario.jl b/src/scenario.jl index bd0517c7f..89aedb533 100644 --- a/src/scenario.jl +++ b/src/scenario.jl @@ -45,7 +45,7 @@ function setup_cache(domain::Domain)::NamedTuple site_area=Matrix{Float64}(site_area(domain)'), # area of locations site_k_area=Matrix{Float64}(site_k_area(domain)'), # location carrying capacity wave_damage=zeros(tf, n_group_and_size, n_locs), # damage coefficient for each size class - dhw_tol_mean_log=zeros(tf, n_group_and_size, n_locs), # tmp log for mean dhw tolerances + dhw_tol_mean_log=zeros(tf, n_group_and_size, n_locs) # tmp log for mean dhw tolerances ) return cache @@ -75,7 +75,9 @@ end Reshape vector to shape [functional_groups ⋅ sizes] """ -function _to_group_size(growth_spec::CoralGrowth, data::AbstractVector{T})::Matrix{T} where {T<:Union{Float32,Float64,Bool}} +function _to_group_size( + growth_spec::CoralGrowth, data::AbstractVector{T} +)::Matrix{T} where {T<:Union{Float32,Float64,Bool}} # Data is reshaped to size ⋅ groups then transposed to maintain expected order return Matrix(reshape(data, (growth_spec.n_sizes, growth_spec.n_groups))') end @@ -118,7 +120,7 @@ function run_scenarios( scens::DataFrame, RCP::Vector{String}; show_progress=true, - remove_workers=true, + remove_workers=true )::ResultSet # Initialize ADRIA configuration options setup() @@ -156,15 +158,18 @@ function run_scenarios( _bin_edges::Matrix{Float64} = bin_edges() functional_groups = [ FunctionalGroup.( - eachrow(_bin_edges[:, 1:end-1]), + eachrow(_bin_edges[:, 1:(end - 1)]), eachrow(_bin_edges[:, 2:end]), eachrow(zeros(n_groups, n_sizes)) ) for _ in 1:n_locs ] - para_threshold = ((typeof(dom) == RMEDomain) || (typeof(dom) == ReefModDomain)) ? 8 : 256 + para_threshold = + ((typeof(dom) == RMEDomain) || (typeof(dom) == ReefModDomain)) ? 8 : 256 active_cores::Int64 = parse(Int64, ENV["ADRIA_NUM_CORES"]) - parallel = (parse(Bool, ENV["ADRIA_DEBUG"]) == false) && (active_cores > 1) && (nrow(scens) >= para_threshold) + parallel = + (parse(Bool, ENV["ADRIA_DEBUG"]) == false) && (active_cores > 1) && + (nrow(scens) >= para_threshold) if parallel && nworkers() == 1 @info "Setting up parallel processing..." spinup_time = @elapsed begin @@ -198,7 +203,9 @@ function run_scenarios( # Switch RCPs so correct data is loaded dom = switch_RCPs!(dom, rcp) - target_rows = findall(scenarios_matrix[factors=At("RCP")] .== parse(Float64, rcp)) + target_rows = findall( + scenarios_matrix[factors=At("RCP")] .== parse(Float64, rcp) + ) scen_args = _scenario_args(dom, scenarios_matrix, rcp, length(target_rows)) if show_progress @@ -223,7 +230,9 @@ function run_scenarios( # Switch RCPs so correct data is loaded dom = switch_RCPs!(dom, rcp) - scen_args = _scenario_args(dom, scenarios_matrix, rcp, size(scenarios_matrix, 1)) + scen_args = _scenario_args( + dom, scenarios_matrix, rcp, size(scenarios_matrix, 1) + ) if show_progress @showprogress desc = run_msg dt = 4 map(func, scen_args) @@ -269,7 +278,7 @@ function run_scenario( idx::Int64, scenario::Union{AbstractVector,DataFrameRow}, functional_groups::Vector{Vector{FunctionalGroup}}, # additional argument for reusable buffer - data_store::NamedTuple, + data_store::NamedTuple )::Nothing if domain.RCP == "" local rcp @@ -294,33 +303,35 @@ function run_scenario( rs_raw::Array{Float64} = result_set.raw vals = relative_cover(rs_raw) - vals[vals.0.0] .+= _to_group_size( + a_adapt[a_adapt .> 0.0] .+= _to_group_size( domain.coral_growth, corals.dist_mean - )[a_adapt.>0.0] + )[a_adapt .> 0.0] # Pre-calculate proportion of survivers from wave stress # Sw_t = wave_damage!(cache.wave_damage, wave_scen, corals.wavemort90, n_species) @@ -651,7 +676,7 @@ function run_model( # basal_area_per_settler is the area in m^2 of a size class one coral basal_area_per_settler = colony_mean_area( - corals.mean_colony_diameter_m[corals.class_id.==1] + corals.mean_colony_diameter_m[corals.class_id .== 1] ) # Dummy vars to fill/replace with ranks of selected locations @@ -685,18 +710,20 @@ function run_model( FLoops.assistant(false) for tstep::Int64 in 2:tf # Convert cover to absolute values to use within CoralBlox model - C_cover_t[:, :, habitable_locs] .= C_cover[tstep-1, :, :, habitable_locs] .* habitable_loc_areas′ + C_cover_t[:, :, habitable_locs] .= + C_cover[tstep - 1, :, :, habitable_locs] .* habitable_loc_areas′ lin_ext_scale_factors::Vector{Float64} = linear_extension_scale_factors( C_cover_t[:, :, habitable_locs], habitable_loc_areas, _linear_extensions, _bin_edges, - habitable_max_projected_cover, + habitable_max_projected_cover ) # ? Should we bring this inside CoralBlox? - lin_ext_scale_factors[_loc_coral_cover(C_cover_t)[habitable_locs].<(0.7 .* habitable_loc_areas)] .= 1 + lin_ext_scale_factors[_loc_coral_cover(C_cover_t)[habitable_locs] .< (0.7 .* habitable_loc_areas)] .= + 1 @floop for i in findall(habitable_locs) # TODO Skip when _loc_rel_leftover_space[i] == 0 @@ -714,16 +741,19 @@ function run_model( end # Check if size classes are inappropriately out-growing habitable area - @assert (sum(_loc_coral_cover(C_cover_t)[habitable_locs] .> habitable_loc_areas) == 0) "Cover outgrowing habitable area" + @assert ( + sum(_loc_coral_cover(C_cover_t)[habitable_locs] .> habitable_loc_areas) == 0 + ) "Cover outgrowing habitable area" # Convert C_cover_t to relative values after CoralBlox was run - C_cover_t[:, :, habitable_locs] .= C_cover_t[:, :, habitable_locs] ./ habitable_loc_areas′ + C_cover_t[:, :, habitable_locs] .= + C_cover_t[:, :, habitable_locs] ./ habitable_loc_areas′ C_cover[tstep, :, :, habitable_locs] .= C_cover_t[:, :, habitable_locs] # Natural adaptation (doesn't change C_cover_t) if tstep <= tf adjust_DHW_distribution!( - @view(C_cover[(tstep-1), :, :, :]), c_mean_t, p.r + @view(C_cover[(tstep - 1), :, :, :]), c_mean_t, p.r ) # Set values for t to t-1 @@ -756,7 +786,7 @@ function run_model( sim_params.max_settler_density, sim_params.max_larval_density, basal_area_per_settler, - potential_settlers, + potential_settlers )[ :, habitable_locs ] ./ loc_k_area[:, habitable_locs] @@ -768,7 +798,7 @@ function run_model( TP_data, # ! IMPORTANT: Pass in transition probability matrix, not connectivity! recruitment, fec_params_per_m², - param_set[At("heritability")], + param_set[At("heritability")] ) # Determine intervention locations whose deployment is assumed to occur @@ -807,7 +837,8 @@ function run_model( ) if !isempty(selected_fog_ranks) - log_location_ranks[tstep, At(selected_fog_ranks), At(:fog)] .= 1:length(selected_fog_ranks) + log_location_ranks[tstep, At(selected_fog_ranks), At(:fog)] .= + 1:length(selected_fog_ranks) end end elseif apply_fogging && fog_decision_years[tstep] @@ -844,7 +875,9 @@ function run_model( # Determine connectivity strength weighting by area. # Accounts for strength of connectivity where there is low/no coral cover - in_conn, out_conn, _ = connectivity_strength(area_weighted_conn, vec(loc_coral_cover), conn_cache) + in_conn, out_conn, _ = connectivity_strength( + area_weighted_conn, vec(loc_coral_cover), conn_cache + ) update_criteria_values!( decision_mat; @@ -869,7 +902,8 @@ function run_model( # Log rankings as appropriate if !isempty(selected_seed_ranks) - log_location_ranks[tstep, At(selected_seed_ranks), At(:seed)] .= 1:length(selected_seed_ranks) + log_location_ranks[tstep, At(selected_seed_ranks), At(:seed)] .= + 1:length(selected_seed_ranks) end elseif apply_seeding && seed_decision_years[tstep] # Unguided deployment, seed/fog corals anywhere, so long as available space > 0 @@ -902,7 +936,7 @@ function run_model( a_adapt, @view(Yseed[tstep, :, :]), c_std, - c_mean_t, + c_mean_t ) # Add coral seeding to recruitment @@ -929,7 +963,7 @@ function run_model( c_std, c_mean_t_1, c_mean_t, - @view(bleaching_mort[(tstep-1):tstep, :, :, :]) + @view(bleaching_mort[(tstep - 1):tstep, :, :, :]) ) # Coral deaths due to selected cyclone scenario @@ -938,7 +972,7 @@ function run_model( cyclone_mortality!(C_cover_t, cyclone_mortality_scen[tstep, :, :]') # Calculate survival_rate due to env. disturbances - ΔC_cover_t[ΔC_cover_t.==0.0] .= 1.0 + ΔC_cover_t[ΔC_cover_t .== 0.0] .= 1.0 survival_rate_cache .= C_cover_t ./ ΔC_cover_t @assert sum(survival_rate_cache .> 1) == 0 "Survival rate should be <= 1" @@ -980,7 +1014,7 @@ function run_model( shade_log=Yshade, site_ranks=log_location_ranks, bleaching_mortality=bleaching_mort, - coral_dhw_log=collated_dhw_tol_log, + coral_dhw_log=collated_dhw_tol_log ) end @@ -1003,7 +1037,9 @@ function cyclone_mortality!(coral_cover, coral_params, cyclone_mortality)::Nothi coral_cover[coral_cover, 1, :] -= coral_deaths_small # Mid class coral mortality - coral_mid = hcat(collect(Iterators.partition(coral_params.mid, length(coral_params.small)))...) + coral_mid = hcat( + collect(Iterators.partition(coral_params.mid, length(coral_params.small)))... + ) for i in 1:size(coral_mid, 1) coral_deaths_mid = coral_cover[coral_mid[i, :], :] .* cyclone_mortality coral_cover[coral_mid[i, :], :] -= coral_deaths_mid @@ -1027,4 +1063,5 @@ function cyclone_mortality!( return nothing end -_loc_coral_cover(C_cover_t::Array{Float64,3}) = dropdims(sum(C_cover_t; dims=(1, 2)), dims=(1, 2)) +_loc_coral_cover(C_cover_t::Array{Float64,3}) = + dropdims(sum(C_cover_t; dims=(1, 2)); dims=(1, 2)) diff --git a/src/spatial/spatial.jl b/src/spatial/spatial.jl index 0c69bd101..11716dab0 100644 --- a/src/spatial/spatial.jl +++ b/src/spatial/spatial.jl @@ -1,6 +1,5 @@ """Functions to interact with spatial datasets.""" - """ _get_geom_col(gdf::DataFrame)::Union{Symbol, Bool} diff --git a/src/utils/scale.jl b/src/utils/scale.jl index 302f5238b..a74eb0f27 100644 --- a/src/utils/scale.jl +++ b/src/utils/scale.jl @@ -4,7 +4,7 @@ end function linear_scale(from_unit::Symbol, to_unit::Symbol)::Float64 resulting_power = unit_power(from_unit) - unit_power(to_unit) - return round(10.0^resulting_power, digits=abs(resulting_power)) + return round(10.0^resulting_power; digits=abs(resulting_power)) end function linear_scale(number::Real, from_unit::Symbol, to_unit::Symbol)::Float64 return number * linear_scale(from_unit, to_unit) @@ -12,7 +12,7 @@ end function quadratic_scale(from_unit::Symbol, to_unit::Symbol)::Float64 resulting_power = unit_power(from_unit) * 2 - unit_power(to_unit) * 2 - return round(10.0^resulting_power, digits=abs(resulting_power)) + return round(10.0^resulting_power; digits=abs(resulting_power)) end function quadratic_scale(number::Real, from_unit::Symbol, to_unit::Symbol)::Float64 return number * quadratic_scale(from_unit, to_unit) diff --git a/src/utils/setup.jl b/src/utils/setup.jl index cb5c3185f..a59b1aa48 100644 --- a/src/utils/setup.jl +++ b/src/utils/setup.jl @@ -15,7 +15,8 @@ function setup()::Nothing ENV["ADRIA_OUTPUT_DIR"] = config["results"]["output_dir"] ENV["ADRIA_NUM_CORES"] = config["operation"]["num_cores"] ENV["ADRIA_THRESHOLD"] = config["operation"]["threshold"] - ENV["ADRIA_DEBUG"] = haskey(config["operation"], "debug") ? config["operation"]["debug"] : false + ENV["ADRIA_DEBUG"] = + haskey(config["operation"], "debug") ? config["operation"]["debug"] : false catch @warn "Could not find config.toml file.\nApplying default configuration and saving results to 'Outputs' in current directory." @@ -29,7 +30,6 @@ function setup()::Nothing return nothing end - """Check to ensure setup has been run.""" function has_setup()::Bool try diff --git a/test/Ecosystem.jl b/test/Ecosystem.jl index 0a2765544..eb19e28bd 100644 --- a/test/Ecosystem.jl +++ b/test/Ecosystem.jl @@ -21,16 +21,16 @@ using ADRIA.Random @test all(mean_diffs .< 1e-10) || "calculated truncated normal with symmetric bounds mean not equal to normal mean" - + mean_diffs = zeros(n_checks) - + # The calculated truncated normal mean should agree with the Distributions.jl implementation for i in 1:n_checks mu = rand(Uniform(0, 10)) stdev = rand(Uniform(0.01, 10)) lb = rand(Uniform(mu - 6.0 * stdev, mu + 3.0 * stdev)) ub = rand(Uniform(lb, lb + 3.0 * stdev)) - + calculated = ADRIA.truncated_normal_mean( mu, stdev, lb, ub ) @@ -56,15 +56,21 @@ end lb = mu - n_std * stdev ub = mu + n_std * stdev - cdf_diffs[3 * (i - 1) + 1] = abs(0.0 - ADRIA.truncated_normal_cdf( - lb, mu, stdev, lb, ub - )) - cdf_diffs[3 * (i - 1) + 2] = abs(0.5 - ADRIA.truncated_normal_cdf( - mu, mu, stdev, lb, ub - )) - cdf_diffs[3 * (i - 1) + 3] = abs(1.0 - ADRIA.truncated_normal_cdf( - ub, mu, stdev, lb, ub - )) + cdf_diffs[3 * (i - 1) + 1] = abs( + 0.0 - ADRIA.truncated_normal_cdf( + lb, mu, stdev, lb, ub + ) + ) + cdf_diffs[3 * (i - 1) + 2] = abs( + 0.5 - ADRIA.truncated_normal_cdf( + mu, mu, stdev, lb, ub + ) + ) + cdf_diffs[3 * (i - 1) + 3] = abs( + 1.0 - ADRIA.truncated_normal_cdf( + ub, mu, stdev, lb, ub + ) + ) end @test all(cdf_diffs .< 1e-7) || diff --git a/test/clustering.jl b/test/clustering.jl index 3aecd9537..f82761312 100644 --- a/test/clustering.jl +++ b/test/clustering.jl @@ -1,100 +1,100 @@ - -@testset "Temporal clustering" begin - @testset "Variable series" begin - d1 = [1.0; 2.0; 3.0] - d2 = [10.0; 20.0; 30.0] - d3 = [1.0; 5.0; 8.0] - - test_data::Matrix = [d1 d2 d3] - - @testset "Compute CE (Complexity)" begin - # Compute CD for test_data - ce = ADRIA.analysis._complexity(test_data) - - # CE is a N Vector, where N is the number of rows in test_data - @test ce isa Vector - @test length(ce) == size(test_data, 2) - - # Expected results - @test ce[1] == sqrt(2.0) + 1 - @test ce[2] == sqrt(200.0) + 1 - @test ce[3] == sqrt(25.0) + 1 - end - - @testset "Compute CF (Correction Factor)" begin - # mock ce vector - ce = [2.5, 207.0, 25.0, 25.0] - - # Expected Results - @test ADRIA.analysis.correction_factor(ce[1], ce[2]) == 207.0 / 2.5 - @test ADRIA.analysis.correction_factor(ce[2], ce[3]) == 207.0 / 25.0 - @test ADRIA.analysis.correction_factor(ce[1], ce[3]) == 25.0 / 2.5 - @test ADRIA.analysis.correction_factor(ce[3], ce[4]) == 1 - end - - @testset "Comput CID Matrix (Complexity Invariance Matrix)" begin - complexity = ADRIA.analysis._complexity(test_data) - cid = ADRIA.analysis.complexity_invariance_distance(test_data) - - # CID is a Matrix (N,N) - @test size(cid, 1) == size(cid, 2) == size(test_data, 2) - - # All CID are positive - @testset "CID positivity" for i in cid - @test i >= 0 - end - - # CID ij and ji entries are the same - @test cid[1, 2] == cid[2, 1] >= 0 - @test cid[1, 3] == cid[3, 1] >= 0 - @test cid[2, 3] == cid[3, 2] >= 0 - - # CID (i,i) is null - @test cid[1, 1] == cid[2, 2] == cid[3, 3] == 0.0 - end - - @testset "Call cluster_series function" begin - # Since test_data is a 3x3 matrix, Clustering.kmeioids requires num_clusters ≤ 3 - num_clusters = 3 - clusters = ADRIA.analysis.cluster_series(test_data, num_clusters) - - @test length(clusters) == size(test_data, 2) - @test -1 ∉ clusters - @test 0 ∉ clusters - @test 1 ∈ clusters - end - end - - @testset "Data with some constant series" begin - d1 = [1.0; 2.0; 3.0] - d2 = [2.0; 3.0; 4.0] - d3 = [6.0; 3.0; 1.0] - d4 = [0.0; 0.0; 0.0] - d5 = [1.0; 1.0; 1.0] - - const_test_data::Matrix = [d1 d2 d3 d4 d5] - - @testset "Compute CE (Complexity)" begin - # Compute CD for test_data - ce = ADRIA.analysis._complexity(const_test_data) - - # CE is a N Vector, where N is the number of rows in test_data - @test ce isa Vector - @test length(ce) == size(const_test_data, 2) - - # Expected results - @test ce[1] == sqrt(2.0) + 1 - @test ce[2] == sqrt(2.0) + 1 - @test ce[3] == sqrt(13.0) + 1 - @test ce[4] == 1 - @test ce[5] == 1 - end - - @testset "Call cluster_series function" begin - num_clusters = 5 - clusters = ADRIA.analysis.cluster_series(const_test_data, num_clusters) - - @test length(clusters) == size(const_test_data, 2) - end - end -end + +@testset "Temporal clustering" begin + @testset "Variable series" begin + d1 = [1.0; 2.0; 3.0] + d2 = [10.0; 20.0; 30.0] + d3 = [1.0; 5.0; 8.0] + + test_data::Matrix = [d1 d2 d3] + + @testset "Compute CE (Complexity)" begin + # Compute CD for test_data + ce = ADRIA.analysis._complexity(test_data) + + # CE is a N Vector, where N is the number of rows in test_data + @test ce isa Vector + @test length(ce) == size(test_data, 2) + + # Expected results + @test ce[1] == sqrt(2.0) + 1 + @test ce[2] == sqrt(200.0) + 1 + @test ce[3] == sqrt(25.0) + 1 + end + + @testset "Compute CF (Correction Factor)" begin + # mock ce vector + ce = [2.5, 207.0, 25.0, 25.0] + + # Expected Results + @test ADRIA.analysis.correction_factor(ce[1], ce[2]) == 207.0 / 2.5 + @test ADRIA.analysis.correction_factor(ce[2], ce[3]) == 207.0 / 25.0 + @test ADRIA.analysis.correction_factor(ce[1], ce[3]) == 25.0 / 2.5 + @test ADRIA.analysis.correction_factor(ce[3], ce[4]) == 1 + end + + @testset "Comput CID Matrix (Complexity Invariance Matrix)" begin + complexity = ADRIA.analysis._complexity(test_data) + cid = ADRIA.analysis.complexity_invariance_distance(test_data) + + # CID is a Matrix (N,N) + @test size(cid, 1) == size(cid, 2) == size(test_data, 2) + + # All CID are positive + @testset "CID positivity" for i in cid + @test i >= 0 + end + + # CID ij and ji entries are the same + @test cid[1, 2] == cid[2, 1] >= 0 + @test cid[1, 3] == cid[3, 1] >= 0 + @test cid[2, 3] == cid[3, 2] >= 0 + + # CID (i,i) is null + @test cid[1, 1] == cid[2, 2] == cid[3, 3] == 0.0 + end + + @testset "Call cluster_series function" begin + # Since test_data is a 3x3 matrix, Clustering.kmeioids requires num_clusters ≤ 3 + num_clusters = 3 + clusters = ADRIA.analysis.cluster_series(test_data, num_clusters) + + @test length(clusters) == size(test_data, 2) + @test -1 ∉ clusters + @test 0 ∉ clusters + @test 1 ∈ clusters + end + end + + @testset "Data with some constant series" begin + d1 = [1.0; 2.0; 3.0] + d2 = [2.0; 3.0; 4.0] + d3 = [6.0; 3.0; 1.0] + d4 = [0.0; 0.0; 0.0] + d5 = [1.0; 1.0; 1.0] + + const_test_data::Matrix = [d1 d2 d3 d4 d5] + + @testset "Compute CE (Complexity)" begin + # Compute CD for test_data + ce = ADRIA.analysis._complexity(const_test_data) + + # CE is a N Vector, where N is the number of rows in test_data + @test ce isa Vector + @test length(ce) == size(const_test_data, 2) + + # Expected results + @test ce[1] == sqrt(2.0) + 1 + @test ce[2] == sqrt(2.0) + 1 + @test ce[3] == sqrt(13.0) + 1 + @test ce[4] == 1 + @test ce[5] == 1 + end + + @testset "Call cluster_series function" begin + num_clusters = 5 + clusters = ADRIA.analysis.cluster_series(const_test_data, num_clusters) + + @test length(clusters) == size(const_test_data, 2) + end + end +end diff --git a/test/connectivity.jl b/test/connectivity.jl index f1b4253b0..a4e43181b 100644 --- a/test/connectivity.jl +++ b/test/connectivity.jl @@ -1,7 +1,6 @@ using ADRIA, ADRIA.DataFrames, ADRIA.CSV import ADRIA.GeoDataFrames as GDF - @testset "Connectivity loading" begin site_data = GDF.read( joinpath(@__DIR__, "..", "examples", "Test_domain", "spatial", "Test_domain.gpkg") @@ -10,10 +9,17 @@ import ADRIA.GeoDataFrames as GDF unique_site_ids = site_data.reef_siteid conn_files = joinpath(@__DIR__, "..", "examples", "Test_domain", "connectivity") - conn_data = CSV.read(joinpath(conn_files, "2000", "test_conn_data.csv"), DataFrame, comment="#", drop=[1], types=Float64) + conn_data = CSV.read( + joinpath(conn_files, "2000", "test_conn_data.csv"), + DataFrame; + comment="#", + drop=[1], + types=Float64 + ) conn_details = ADRIA.site_connectivity(conn_files, unique_site_ids) conn = conn_details.conn - @test all(names(conn, 2) .== site_data.reef_siteid) || "Sites do not match expected order!" + @test all(names(conn, 2) .== site_data.reef_siteid) || + "Sites do not match expected order!" end diff --git a/test/data_loading.jl b/test/data_loading.jl index 253508491..625970480 100644 --- a/test/data_loading.jl +++ b/test/data_loading.jl @@ -2,7 +2,6 @@ using ADRIA, ADRIA.DataFrames, ADRIA.CSV using ADRIA.YAXArrays import ADRIA.GDF as GDF - if !@isdefined(ADRIA_DIR) const ADRIA_DIR = pkgdir(ADRIA) const TEST_DATA_DIR = joinpath(ADRIA_DIR, "test", "data") @@ -26,7 +25,13 @@ end unique_site_ids = site_data.reef_siteid conn_files = joinpath(TEST_DOMAIN_PATH, "connectivity") - conn_data = CSV.read(joinpath(conn_files, "example_conn.csv"), DataFrame, comment="#", drop=[1], types=Float64) + conn_data = CSV.read( + joinpath(conn_files, "example_conn.csv"), + DataFrame; + comment="#", + drop=[1], + types=Float64 + ) conn_details = ADRIA.site_connectivity(conn_files, unique_site_ids) @@ -34,7 +39,8 @@ end d1, d2 = axes(conn) @test all(d1.dim .== d2.dim) || "Site order does not match between rows/columns." @test all(d2.dim .== site_data.reef_siteid) || "Sites do not match expected order." - @test all(unique_site_ids .== conn_details.site_ids) || "Included site ids do not match length/order in geospatial file." + @test all(unique_site_ids .== conn_details.site_ids) || + "Included site ids do not match length/order in geospatial file." end @testset "Environmental data" begin @@ -44,11 +50,13 @@ end wave_fn = joinpath(TEST_DOMAIN_PATH, "waves", "wave_RCP45.nc") waves = ADRIA.load_env_data(wave_fn, "Ub") - @test all(axes(waves, 2).dim .== site_data.reef_siteid) || "Wave data not aligned with order specified in geospatial data" + @test all(axes(waves, 2).dim .== site_data.reef_siteid) || + "Wave data not aligned with order specified in geospatial data" dhw_fn = joinpath(TEST_DOMAIN_PATH, "DHWs", "dhwRCP45.nc") dhw = ADRIA.load_env_data(dhw_fn, "dhw") - @test all(axes(dhw, 2).dim .== site_data.reef_siteid) || "Wave data not aligned with order specified in geospatial data" + @test all(axes(dhw, 2).dim .== site_data.reef_siteid) || + "Wave data not aligned with order specified in geospatial data" end @testset "Initial covers" begin @@ -58,7 +66,8 @@ end coral_cover_fn = joinpath(TEST_DOMAIN_PATH, "spatial", "coral_cover.nc") coral_covers = ADRIA.load_initial_cover(coral_cover_fn) - @test all(axes(coral_covers, 2).dim .== site_data.reef_siteid) || "Coral cover data not aligned with order specified in geospatial data" + @test all(axes(coral_covers, 2).dim .== site_data.reef_siteid) || + "Coral cover data not aligned with order specified in geospatial data" end @testset "Cyclone mortality data" begin @@ -77,6 +86,8 @@ end "large_massives" ] - @test all(axes(cyclone_mortality, 2).dim .== site_data.reef_siteid) || "Cyclone mortality locations do not align with location order specified in geospatial data" - @test all(axes(cyclone_mortality, 3).dim .== expected_species_order) || "Cyclone mortality data does not list species in expected order" + @test all(axes(cyclone_mortality, 2).dim .== site_data.reef_siteid) || + "Cyclone mortality locations do not align with location order specified in geospatial data" + @test all(axes(cyclone_mortality, 3).dim .== expected_species_order) || + "Cyclone mortality data does not list species in expected order" end diff --git a/test/growth.jl b/test/growth.jl index 9afb27735..eebe2a2b6 100644 --- a/test/growth.jl +++ b/test/growth.jl @@ -28,15 +28,15 @@ end # check colony areas in cm^2 are within bounds designated by bin edges for k in 1:6 @test all( - stored_colony_mean_areas[coral_params.class_id.==k] .>= - bin_edge_diameters_cm2[k], + stored_colony_mean_areas[coral_params.class_id .== k] .>= + bin_edge_diameters_cm2[k] ) || - "Some colony areas for size class $k are larger than the size class upper bound." + "Some colony areas for size class $k are larger than the size class upper bound." @test all( - stored_colony_mean_areas[coral_params.class_id.==k] .>= - bin_edge_diameters_cm2[k], + stored_colony_mean_areas[coral_params.class_id .== k] .>= + bin_edge_diameters_cm2[k] ) || - "Some colony areas for size class $k are smaller than the size class lower bound." + "Some colony areas for size class $k are smaller than the size class lower bound." end end @@ -79,7 +79,7 @@ end 51072.30305499843, 68331.04154366927, 91421.98332850973, - 122315.9906084096, + 122315.9906084096 ] C_cover_t = rand(Uniform(0.0, 0.01), 36, 216) @@ -299,13 +299,15 @@ end 140815.23524318123, 60269.32989888545, 51815.93369295262, - 49022.921055841725, + 49022.921055841725 ] - ADRIA.fecundity_scope!(fec_groups, fec_all, fec_params, C_cover_t, Matrix(total_site_area')) + ADRIA.fecundity_scope!( + fec_groups, fec_all, fec_params, C_cover_t, Matrix(total_site_area') + ) @test any(fec_groups .> 1e8) || - "Fecundity is measured in m² and so should be a very large number" + "Fecundity is measured in m² and so should be a very large number" @test !any(fec_groups .< 0.0) || "Negative fecundity is not allowed" end @@ -323,11 +325,11 @@ end tstep, a_adapt, n_adapt, - dhw_scen[tstep-1, :], + dhw_scen[tstep - 1, :], LPdhwcoeff, DHWmaxtot, LPDprm2, - n_groups, + n_groups ) @test all(0.0 .<= LPs .< 1.0) || "Larval Production must be between 0 and 1" end @@ -349,6 +351,6 @@ end theoretical_max = ((avail_area .* max_cover .* total_site_area)' * 51.8) for (i, rec) in enumerate(eachrow(abs_recruits)) @test all(rec' .<= theoretical_max) || - "Species group $i exceeded maximum theoretical number of settlers" + "Species group $i exceeded maximum theoretical number of settlers" end end diff --git a/test/io/inputs.jl b/test/io/inputs.jl index a7f2300e8..695da71a7 100644 --- a/test/io/inputs.jl +++ b/test/io/inputs.jl @@ -71,7 +71,7 @@ end yax_res = ZeroDataCube(; T=Int, NamedTuple{(dim_1_name,)}((dim_1_vals,))...) - @test typeof(yax_res) <: YAXArray{Int, 1} || + @test typeof(yax_res) <: YAXArray{Int,1} || "Incorrect return type. Expected a subtype of YAXArray{Int, 1} \ but received $(typeof(yax_res))" @@ -88,9 +88,11 @@ end @test dim_1_vals == collect(yax_res.axes[1]) || "Incorrect axis indices. Expected $(dim_1_vals) but received $(collect(yax_res.axes[1]))" - yax_res = ZeroDataCube(; T=Float64, NamedTuple{(dim_1_name, dim_2_name)}((dim_1_vals, dim_2_vals))...) + yax_res = ZeroDataCube(; + T=Float64, NamedTuple{(dim_1_name, dim_2_name)}((dim_1_vals, dim_2_vals))... + ) - @test typeof(yax_res) <: YAXArray{Float64, 2} || + @test typeof(yax_res) <: YAXArray{Float64,2} || "Incorrect return type. Expected a subtype of YAXArray{Float64, 2} \ but received $(typeof(yax_res))" @@ -149,10 +151,12 @@ end rand(10, 5); NamedTuple{(dim_1_name, dim_2_name)}((dim_1_vals, dim_2_vals))... ) test_DataCube( - rand(6); NamedTuple{(dim_3_name,)}((dim_3_vals, ))... + rand(6); NamedTuple{(dim_3_name,)}((dim_3_vals,))... ) test_DataCube( rand(6, 5, 10); - NamedTuple{(dim_3_name, dim_2_name, dim_1_name)}((dim_3_vals, dim_2_vals, dim_1_vals))... + NamedTuple{(dim_3_name, dim_2_name, dim_1_name)}(( + dim_3_vals, dim_2_vals, dim_1_vals + ))... ) end diff --git a/test/mcda.jl b/test/mcda.jl index 236c625d9..4271034cf 100644 --- a/test/mcda.jl +++ b/test/mcda.jl @@ -3,7 +3,6 @@ using ADRIA.Distributions using ADRIA.decision.JMcDM using ADRIA.decision: subtypes - @testset "Validate included MCDA methods" begin """ Identifies MCDA methods that pass a simple test to inform whether they should be included diff --git a/test/metrics.jl b/test/metrics.jl index bef4d0d55..e2228d824 100644 --- a/test/metrics.jl +++ b/test/metrics.jl @@ -10,37 +10,44 @@ using ADRIA: DataCube, ZeroDataCube # Create dummy coral covers and make sum of values <= 1.0 (i.e., proportional to area) coral_cover = DataCube(rand(5, 35, 3, 1), (:timesteps, :species, :sites, :scenarios)) - coral_cover .= coral_cover ./ sum(coral_cover, dims=:species) + coral_cover .= coral_cover ./ sum(coral_cover; dims=:species) k_area = Float64[70, 60, 50] # in m² - r_sv = ADRIA.metrics.relative_shelter_volume(coral_cover, k_area, DataFrame(test_scens[1, :])) + r_sv = ADRIA.metrics.relative_shelter_volume( + coral_cover, k_area, DataFrame(test_scens[1, :]) + ) @test all(0.0 .<= r_sv .<= 1.0) @test any(r_sv .>= 0.05) # warn if all values ae very tiny values (catch Issue #91 : https://github.com/open-AIMS/ADRIA.jl/issues/91) # Test multi-scenario case coral_cover = DataCube(rand(5, 35, 3, 5), (:timesteps, :species, :sites, :scenarios)) - coral_cover .= coral_cover ./ sum(coral_cover, dims=:species) - r_sv = ADRIA.metrics.relative_shelter_volume(coral_cover, k_area, DataFrame(test_scens[1:5, :])) + coral_cover .= coral_cover ./ sum(coral_cover; dims=:species) + r_sv = ADRIA.metrics.relative_shelter_volume( + coral_cover, k_area, DataFrame(test_scens[1:5, :]) + ) - @test all(0.0 .<= r_sv .<= 1.0) || "Min CC: $(minimum(sum(coral_cover, dims=:species))); Max CC: $(maximum(sum(coral_cover, dims=:species))) | $((minimum(r_sv), maximum(r_sv)))" + @test all(0.0 .<= r_sv .<= 1.0) || + "Min CC: $(minimum(sum(coral_cover, dims=:species))); Max CC: $(maximum(sum(coral_cover, dims=:species))) | $((minimum(r_sv), maximum(r_sv)))" @test any(r_sv .>= 0.05) - # Test zero value case coral_cover = ZeroDataCube((:timesteps, :species, :sites, :scenarios), (5, 35, 3, 5)) - r_sv = ADRIA.metrics.relative_shelter_volume(coral_cover, k_area, DataFrame(test_scens[1:5, :])) + r_sv = ADRIA.metrics.relative_shelter_volume( + coral_cover, k_area, DataFrame(test_scens[1:5, :]) + ) @test all(r_sv .== 0.0) - # Maximum shelter volume case coral_cover = DataCube(rand(5, 35, 3, 5), (:timesteps, :species, :sites, :scenarios)) coral_cover[species=24, sites=1:3] .= k_area' # Coral type with maximum shelter density - r_sv = ADRIA.metrics.relative_shelter_volume(coral_cover, k_area, DataFrame(test_scens[1:5, :])) - @test all(r_sv .== 1.0) || "Scenario with complete coral cover does not achieve max RSV | $(maximum(r_sv))" + r_sv = ADRIA.metrics.relative_shelter_volume( + coral_cover, k_area, DataFrame(test_scens[1:5, :]) + ) + @test all(r_sv .== 1.0) || + "Scenario with complete coral cover does not achieve max RSV | $(maximum(r_sv))" end - # @testset "metric modifications" begin # dom = ADRIA.load_domain(joinpath(@__DIR__, "..", "examples", "Example_domain"), 45) # sa = site_area(dom) diff --git a/test/runtests.jl b/test/runtests.jl index 3dd900d6a..5c1a1927d 100644 --- a/test/runtests.jl +++ b/test/runtests.jl @@ -13,10 +13,8 @@ const TEST_DATA_DIR = joinpath(ADRIA_DIR, "test", "data") const TEST_DOMAIN_PATH = joinpath(TEST_DATA_DIR, "Test_domain") const TEST_REEFMOD_ENGINE_DOMAIN_PATH = joinpath(TEST_DATA_DIR, "Reefmod_test_domain") - include("aqua.jl") - """Test smaller scenario run with example scenario specification""" function test_small_spec_rs() # Load and apply configuration options @@ -107,8 +105,8 @@ function test_rs_w_fig() :N_seed_CA, :fogging, :SRM, - :a_adapt, - ], + :a_adapt + ] ) ### Scenario outcomes @@ -126,14 +124,14 @@ function test_rs_w_fig() rs, s_tac; opts=Dict(:by_RCP => false, :legend => false), - axis_opts=Dict(:title => "TAC [m²]"), + axis_opts=Dict(:title => "TAC [m²]") ) ADRIA.viz.scenarios!( tf[1, 2], rs, s_juves; opts=Dict(:summarize => false), - axis_opts=Dict(:title => "Juveniles [%]"), + axis_opts=Dict(:title => "Juveniles [%]") ) # tf # display the figure @@ -169,7 +167,7 @@ function test_rs_w_fig() pawn_fig = ADRIA.viz.pawn( tac_Si; opts, - fig_opts, + fig_opts ) # save("pawn_si.png", pawn_fig) @@ -180,7 +178,7 @@ function test_rs_w_fig() rs, tsa_s; opts, - fig_opts, + fig_opts ) # save("tsa.png", tsa_fig) @@ -215,12 +213,12 @@ function test_rs_w_fig() axis_opts = Dict{Symbol,Any}( :title => "Time Series Clustering with $n_clusters clusters", :ylabel => "TAC [m²]", - :xlabel => "Timesteps [years]", + :xlabel => "Timesteps [years]" ) tsc_fig = ADRIA.viz.clustered_scenarios( s_tac, clusters; opts=Dict{Symbol,Any}(:summarize => true), fig_opts=fig_opts, - axis_opts=axis_opts, + axis_opts=axis_opts ) # Save final figure @@ -257,7 +255,7 @@ function test_rs_w_fig() metrics::Vector{ADRIA.metrics.Metric} = [ ADRIA.metrics.scenario_total_cover, ADRIA.metrics.scenario_asv, - ADRIA.metrics.scenario_absolute_juveniles, + ADRIA.metrics.scenario_absolute_juveniles ] outcomes = ADRIA.metrics.scenario_outcomes(rs, metrics) @@ -306,7 +304,10 @@ function test_rs_w_fig() target_clusters = ADRIA.analysis.target_clusters(clusters, s_tac) # Select only desired features - fields_iv = ADRIA.component_params(rs, [Intervention, FogCriteriaWeights, SeedCriteriaWeights]).fieldname + fields_iv = + ADRIA.component_params( + rs, [Intervention, FogCriteriaWeights, SeedCriteriaWeights] + ).fieldname scenarios_iv = scens[:, fields_iv] # Use SIRUS algorithm to extract rules @@ -338,7 +339,7 @@ function test_rs_w_fig() tac_rs, foi; opts, - fig_opts, + fig_opts ) # save("rsa.png", rsa_fig) diff --git a/test/sampling.jl b/test/sampling.jl index 372896017..dc95cf3ba 100644 --- a/test/sampling.jl +++ b/test/sampling.jl @@ -14,15 +14,19 @@ end constant_params = ms.is_constant @testset "constant params are constant" begin - @test all(values(scens[1, constant_params]) .== values(scens[end, constant_params])) || - "Constant params are not constant!" + @test all( + values(scens[1, constant_params]) .== values(scens[end, constant_params]) + ) || + "Constant params are not constant!" end @testset "values are within expected bounds" begin lb = values(ms[:, :lower_bound]) ub = values(ms[:, :upper_bound]) - not_cw_mask = ms.component .∉ [("SeedCriteriaWeights", "FogCriteriaWeights", "DepthThresholds")] + not_cw_mask = + ms.component .∉ + [("SeedCriteriaWeights", "FogCriteriaWeights", "DepthThresholds")] not_cw_lb, not_cw_ub = lb[not_cw_mask], ub[not_cw_mask] eco = (ms.component .== "Coral") .& .!(constant_params) @@ -36,14 +40,16 @@ end if scens[i, :guided] > 0 cond = not_cw_lb .<= not_cw_scen_vals .<= not_cw_ub - @test all(cond) || "$msg | $(ms[.!(cond), :]) | $(not_cw_scen_vals[.!(cond)])" + @test all(cond) || + "$msg | $(ms[.!(cond), :]) | $(not_cw_scen_vals[.!(cond)])" continue end # When no interventions are used, e.g., for counterfactual or unguided scenarios # (guided ∈ [-1, 0]) intervention parameters are set to 0 so only check ecological values cond = lb[eco] .<= scen_vals[eco] .<= ub[eco] - @test all(cond) || "$coral_msg | $(ms[.!(cond), :]) | $(scen_vals[eco][.!(cond)])" + @test all(cond) || + "$coral_msg | $(ms[.!(cond), :]) | $(scen_vals[eco][.!(cond)])" # Note: Test to ensure all intervention factors are set to 0 is covered by the guided # sampling test below @@ -68,7 +74,7 @@ end # Ensure all interventions are deactivated (ignoring the "guided" factor) interv_params = String[ip for ip in interv_params if ip != "guided"] @test all(all.(==(0), eachcol(scens[:, interv_params]))) || - "Intervention factors with values > 0 found" + "Intervention factors with values > 0 found" end @testset "Guided sampling" begin @@ -90,15 +96,15 @@ end # Ensure at least one intervention is active @test all(any.(>(0), eachcol(scens[:, interv_params]))) || - "All intervention factors had values <= 0" + "All intervention factors had values <= 0" seed_weights = ADRIA.component_params(ms, ADRIA.SeedCriteriaWeights).fieldname fog_weights = ADRIA.component_params(ms, ADRIA.FogCriteriaWeights).fieldname @test all(abs.(sum(Matrix(scens[:, seed_weights]); dims=2) .- 1.0) .< 10e-6) || - "Some seeding weights are not properly normalized." + "Some seeding weights are not properly normalized." @test all(abs.(sum(Matrix(scens[:, fog_weights]); dims=2) .- 1.0) .< 10e-6) || - "Some fogging weights are not properly normalized." + "Some fogging weights are not properly normalized." end @testset "Unguided sampling" begin @@ -121,7 +127,7 @@ end # Ensure at least one intervention is active @test all(any.(>(0), eachcol(scens[:, interv_params]))) || - "All intervention factors had values <= 0" + "All intervention factors had values <= 0" end @testset "Site selection sampling" begin @@ -151,13 +157,13 @@ end # Ensure at least one intervention is active @test all(any.(>(0), eachcol(scens[:, target_params]))) || - "All target factors had values <= 0" + "All target factors had values <= 0" # Check that all coral parameters are set to their nominated default values coral_params = ADRIA.component_params(ms, ADRIA.Coral).fieldname @test all([ - all(scens[:, c] .== ms[ms.fieldname.==c, :val][1]) for c in coral_params + all(scens[:, c] .== ms[ms.fieldname .== c, :val][1]) for c in coral_params ]) || "Non-default coral parameter value found" end end @@ -168,26 +174,26 @@ end ms = ADRIA.model_spec(dom) @testset "Continuous variables" begin - continuous_factors = ms[(ms.ptype.∉[ADRIA.DISCRETE_FACTOR_TYPES]), :] + continuous_factors = ms[(ms.ptype .∉ [ADRIA.DISCRETE_FACTOR_TYPES]), :] for factor in eachrow(continuous_factors) fn = factor.fieldname @test ADRIA.get_bounds(dom, fn) == factor.dist_params[1:2] @test ADRIA.get_attr(dom, fn, :default_dist_params) == - factor.default_dist_params + factor.default_dist_params end end @testset "Discrete variables" begin - discrete_factors = ms[(ms.ptype.∈[ADRIA.DISCRETE_FACTOR_TYPES]), :] + discrete_factors = ms[(ms.ptype .∈ [ADRIA.DISCRETE_FACTOR_TYPES]), :] for factor in eachrow(discrete_factors) fn = factor.fieldname @test ADRIA.get_bounds(dom, fn)[1] == factor.dist_params[1] @test ADRIA.get_bounds(dom, fn)[2] == factor.dist_params[2] @test ADRIA.get_attr(dom, fn, :default_dist_params)[1] == - factor.default_dist_params[1] + factor.default_dist_params[1] @test ADRIA.get_attr(dom, fn, :default_dist_params)[2] == - factor.default_dist_params[2] + factor.default_dist_params[2] end end end @@ -201,8 +207,10 @@ end test_components = ["EnvironmentalLayer", "Intervention", "Coral"] - function _test_bounds(scens::DataFrame, factor_mask::BitVector, bounds_ranges::Vector) - filt_scens = Matrix(scens[scens.guided.>0, factor_mask]) + function _test_bounds( + scens::DataFrame, factor_mask::BitVector, bounds_ranges::Vector + ) + filt_scens = Matrix(scens[scens.guided .> 0, factor_mask]) min_scens, max_scens = vcat.([extrema(x) for x in eachcol(filt_scens)]...) min_bounds, max_bounds = vcat.(extrema.(collect.(bounds_ranges))...) @@ -225,10 +233,11 @@ end end @testset "set to default bounds" begin - new_bounds = ADRIA.get_attr.([dom], factor_fieldnames, [:default_dist_params]) + new_bounds = + ADRIA.get_attr.([dom], factor_fieldnames, [:default_dist_params]) dom = set_factor_bounds(dom; NamedTuple{factor_fieldnames}(new_bounds)...) - factor_params = dom.model[ms.fieldname.∈[factor_fieldnames]][1] + factor_params = dom.model[ms.fieldname .∈ [factor_fieldnames]][1] @test all(factor_params.dist_params .== factor_params.default_dist_params) scens = ADRIA.sample(dom, num_samples) @@ -251,10 +260,11 @@ end end @testset "get_default_dist_params" begin - new_bounds = ADRIA.get_attr.([dom], factor_fieldnames, [:default_dist_params]) + new_bounds = + ADRIA.get_attr.([dom], factor_fieldnames, [:default_dist_params]) dom = set_factor_bounds(dom; NamedTuple{factor_fieldnames}(new_bounds)...) - factor_params = dom.model[ms.fieldname.∈[factor_fieldnames]][1] + factor_params = dom.model[ms.fieldname .∈ [factor_fieldnames]][1] @test all(factor_params.dist_params .== factor_params.default_dist_params) scens = ADRIA.sample(dom, num_samples) @@ -264,7 +274,9 @@ end end @testset "DiscreteOrderedUniformDist distributions" begin - factor_mask = ms.component .∈ [test_components] .&& ms.dist .== ADRIA.DiscreteOrderedUniformDist + factor_mask = + ms.component .∈ [test_components] .&& + ms.dist .== ADRIA.DiscreteOrderedUniformDist factors = ms[factor_mask, :] factor_fieldnames = (factors.fieldname...,) @testset "set_factor_bounds" begin @@ -272,17 +284,20 @@ end new_bounds = Tuple.(sort.(rand.(bounds_ranges, 2))) new_steps = [ceil((nb[2] - nb[1]) / 10) for nb in new_bounds] new_dist_params = [(b[1], b[2], s) for (b, s) in zip(new_bounds, new_steps)] - dom = set_factor_bounds(dom; NamedTuple{factor_fieldnames}(new_dist_params)...) + dom = set_factor_bounds( + dom; NamedTuple{factor_fieldnames}(new_dist_params)... + ) scens = ADRIA.sample_guided(dom, num_samples) _test_bounds(scens, factor_mask, bounds_ranges) end @testset "get_default_dist_params" begin - new_bounds = ADRIA.get_attr.([dom], factor_fieldnames, [:default_dist_params]) + new_bounds = + ADRIA.get_attr.([dom], factor_fieldnames, [:default_dist_params]) dom = set_factor_bounds(dom; NamedTuple{factor_fieldnames}(new_bounds)...) - factor_params = dom.model[ms.fieldname.∈[factor_fieldnames]][1] + factor_params = dom.model[ms.fieldname .∈ [factor_fieldnames]][1] @test all(factor_params.dist_params .== factor_params.default_dist_params) scens = ADRIA.sample(dom, num_samples) @@ -300,17 +315,20 @@ end mode_ranges = [range(nb[1], nb[2], 5) for nb in new_bounds] new_modes = (rand.(mode_ranges)) new_dist_params = [(b[1], b[2], p) for (b, p) in zip(new_bounds, new_modes)] - dom = set_factor_bounds(dom; NamedTuple{factor_fieldnames}(new_dist_params)...) + dom = set_factor_bounds( + dom; NamedTuple{factor_fieldnames}(new_dist_params)... + ) scens = ADRIA.sample_guided(dom, num_samples) _test_bounds(scens, factor_mask, new_bounds) end @testset "get_default_dist_params" begin - new_bounds = ADRIA.get_attr.([dom], factor_fieldnames, [:default_dist_params]) + new_bounds = + ADRIA.get_attr.([dom], factor_fieldnames, [:default_dist_params]) dom = set_factor_bounds(dom; NamedTuple{factor_fieldnames}(new_bounds)...) - factor_params = dom.model[ms.fieldname.∈[factor_fieldnames]][1] + factor_params = dom.model[ms.fieldname .∈ [factor_fieldnames]][1] @test all(factor_params.dist_params .== factor_params.default_dist_params) scens = ADRIA.sample(dom, num_samples) @@ -319,7 +337,9 @@ end end @testset "DiscreteTriangularDist distributions" begin - factor_mask = ms.component .∈ [test_components] .&& ms.dist .== ADRIA.DiscreteTriangularDist + factor_mask = + ms.component .∈ [test_components] .&& + ms.dist .== ADRIA.DiscreteTriangularDist factors = ms[factor_mask, :] factor_fieldnames = (factors.fieldname...,) @testset "set_factor_bounds" begin @@ -328,17 +348,20 @@ end new_mode_ranges = [nb[1]:nb[2] for nb in new_bounds] new_peaks = (rand.(new_mode_ranges)) new_dist_params = [(b[1], b[2], p) for (b, p) in zip(new_bounds, new_peaks)] - dom = set_factor_bounds(dom; NamedTuple{factor_fieldnames}(new_dist_params)...) + dom = set_factor_bounds( + dom; NamedTuple{factor_fieldnames}(new_dist_params)... + ) scens = ADRIA.sample_guided(dom, num_samples) _test_bounds(scens, factor_mask, new_bounds) end @testset "get_default_dist_params" begin - new_bounds = ADRIA.get_attr.([dom], factor_fieldnames, [:default_dist_params]) + new_bounds = + ADRIA.get_attr.([dom], factor_fieldnames, [:default_dist_params]) dom = set_factor_bounds(dom; NamedTuple{factor_fieldnames}(new_bounds)...) - factor_params = dom.model[ms.fieldname.∈[factor_fieldnames]][1] + factor_params = dom.model[ms.fieldname .∈ [factor_fieldnames]][1] @test all(factor_params.dist_params .== factor_params.default_dist_params) scens = ADRIA.sample(dom, num_samples) diff --git a/test/seeding.jl b/test/seeding.jl index 271017b14..d30da5015 100644 --- a/test/seeding.jl +++ b/test/seeding.jl @@ -39,7 +39,7 @@ end total_area_seed = seed_dist .* total_site_area[seed_locs]' # total area of seeded corals - total_area_coral_out = sum(total_area_seed, dims=2) + total_area_coral_out = sum(total_area_seed; dims=2) # absolute available area to seed for selected sites selected_avail_space = available_space[seed_locs] @@ -68,12 +68,17 @@ end seed_SM = seeded_area[taxa=At("N_seed_SM")][1] approx_zero(x) = abs(x) + one(1.0) ≈ one(1.0) - @test approx_zero(seed_TA - area_TA) && approx_zero(seed_CA - area_CA) && approx_zero(seed_CA - area_CA) || "Area of corals seeded not equal to (colony area) * (number or corals)" + @test approx_zero(seed_TA - area_TA) && approx_zero(seed_CA - area_CA) && + approx_zero(seed_CA - area_CA) || + "Area of corals seeded not equal to (colony area) * (number or corals)" @test all(seed_dist .< 1.0) || "Some proportions of seeded corals greater than 1" @test all(seed_dist .>= 0.0) || "Some proportions of seeded corals less than zero" - @test all(total_area_seed .< selected_avail_space') || "Area seeded greater than available area" - @test (max_ind_out == max_ind) || "Maximum distributed proportion of seeded coral not seeded in largest available area." - @test (min_ind_out == min_ind) || "Minimum distributed proportion of seeded coral not seeded in smallest available area." + @test all(total_area_seed .< selected_avail_space') || + "Area seeded greater than available area" + @test (max_ind_out == max_ind) || + "Maximum distributed proportion of seeded coral not seeded in largest available area." + @test (min_ind_out == min_ind) || + "Minimum distributed proportion of seeded coral not seeded in smallest available area." end @testset "DHW distribution priors" begin @@ -94,7 +99,8 @@ end orig_dist = copy(c_dist_t) dist_std = rand(36) - seed_corals!(C_cover_t, total_location_area, leftover_space_m², seed_locs, seeded_area, seed_sc, + seed_corals!(C_cover_t, total_location_area, leftover_space_m², seed_locs, + seeded_area, seed_sc, a_adapt, @view(Yseed[1, :, :]), dist_std, c_dist_t) # Ensure correct priors/weightings for each location @@ -102,7 +108,8 @@ end for (i, sc) in enumerate(findall(seed_sc)) prior1 = Yseed[1, i, loc] ./ C_cover_t[sc, loc] expected = [prior1, 1.0 - prior1] - @test c_dist_t[sc, loc] > orig_dist[sc, loc] || "Expected mean of distribution to shift | SC: $sc ; Location: $loc" + @test c_dist_t[sc, loc] > orig_dist[sc, loc] || + "Expected mean of distribution to shift | SC: $sc ; Location: $loc" end end end diff --git a/test/site_selection.jl b/test/site_selection.jl index 198fb1299..622082ef0 100644 --- a/test/site_selection.jl +++ b/test/site_selection.jl @@ -1,7 +1,6 @@ using Test using ADRIA.Distributions - if !@isdefined(ADRIA_DIR) const ADRIA_DIR = pkgdir(ADRIA) const TEST_DOMAIN_PATH = joinpath(ADRIA_DIR, "test", "data", "Test_domain") @@ -34,7 +33,7 @@ end fog_years, 5, max_cover, - depth_priority, + depth_priority ) # Check that only two sites are selected (the sites where k > 0.0) @@ -52,15 +51,17 @@ end area_to_seed = 962.11 # Area of seeded corals in m^2. - sum_cover = repeat(sum(dom.init_coral_cover, dims=1), size(scens, 1)) + sum_cover = repeat(sum(dom.init_coral_cover; dims=1), size(scens, 1)) ranks = ADRIA.decision.rank_locations(dom, scens, sum_cover, area_to_seed) - @test length(ranks.scenarios) == sum(scens.guided .> 0) || "Specified number of scenarios was not carried out." - @test length(ranks.sites) == length(dom.site_ids) || "Ranks storage is not correct size for this domain." + @test length(ranks.scenarios) == sum(scens.guided .> 0) || + "Specified number of scenarios was not carried out." + @test length(ranks.sites) == length(dom.site_ids) || + "Ranks storage is not correct size for this domain." sel_sites = unique(ranks) - sel_sites = sel_sites[sel_sites.!=0.0] - possible_ranks = collect(Float64, 1:ADRIA.n_locations(dom)+1.0) + sel_sites = sel_sites[sel_sites .!= 0.0] + possible_ranks = collect(Float64, 1:(ADRIA.n_locations(dom) + 1.0)) @test all([in(ss, possible_ranks) for ss in sel_sites]) || "Impossible rank assigned." end @@ -83,6 +84,8 @@ end S, weights, rankings, n_site_int, mcda_func, 2 ) - @test all([(rankings[rankings[:, 1].==s_order[rank, 1], 2].==rank)[1] for rank in 1:size(s_order, 1)]) || "Ranking does not match mcda score ordering" - + @test all([ + (rankings[rankings[:, 1] .== s_order[rank, 1], 2] .== rank)[1] for + rank in 1:size(s_order, 1) + ]) || "Ranking does not match mcda score ordering" end diff --git a/test/spatial_clustering.jl b/test/spatial_clustering.jl index eafeba288..72d4814a7 100644 --- a/test/spatial_clustering.jl +++ b/test/spatial_clustering.jl @@ -16,7 +16,7 @@ using Statistics prefsites = site_order[1:5] reef_locs = [fill("1", 10)..., fill("2", 10)..., fill("3", 10)...] - s_order = Union{Float64, Int64}[Int64.(site_order) rand(n_sites)] + s_order = Union{Float64,Int64}[Int64.(site_order) rand(n_sites)] # All selected sites are in the same reef, so 2 should be replaced reef_locs[prefsites] .= "2" # Empty ranking just for testing @@ -44,7 +44,7 @@ using Statistics # Set no 3 sites to be in the same reef and check none are replaced reef_locs[prefsites] .= ["1", "2", "3", "4", "1"] - s_order = Union{Float64, Int64}[Int64.(orig_site_order) rand(n_sites)] + s_order = Union{Float64,Int64}[Int64.(orig_site_order) rand(n_sites)] rankings = Int64[site_ids zeros(Int64, n_sites) zeros(Int64, n_sites)] new_prefsites, rankings = constrain_reef_cluster( @@ -54,7 +54,7 @@ using Statistics area_to_seed, available_space, n_iv_locs, - 3, + 3 ) @test all(new_prefsites .== prefsites) || @@ -64,7 +64,7 @@ using Statistics available_space[prefsites] .= (area_to_seed - 100.0) / n_iv_locs available_space[s_order[n_iv_locs + 1, 1]] = area_to_seed - s_order = Union{Float64, Int64}[Int64.(orig_site_order) rand(n_sites)] + s_order = Union{Float64,Int64}[Int64.(orig_site_order) rand(n_sites)] rankings = Int64[site_ids zeros(Int64, n_sites) zeros(Int64, n_sites)] new_prefsites, rankings = constrain_reef_cluster( diff --git a/test/spec.jl b/test/spec.jl index 060df84f3..aba5d588d 100644 --- a/test/spec.jl +++ b/test/spec.jl @@ -1,26 +1,26 @@ -if !@isdefined(ADRIA_DIR) - const ADRIA_DIR = pkgdir(ADRIA) - const TEST_DOMAIN_PATH = joinpath(ADRIA_DIR, "test", "data", "Test_domain") -end - -@testset "Extracting component parameters" begin - # Run full example to make sure nothing errors - ADRIA.setup() # Load and apply configuration options - - # Use a temporary directory for result location - ENV["ADRIA_OUTPUT_DIR"] = mktempdir() - - dom = ADRIA.load_domain(TEST_DOMAIN_PATH, "45") - - x = ADRIA.component_params(dom.model, Intervention) - @test size(x, 1) > 0 - - x = ADRIA.component_params(dom.model, ADRIA.Coral) - @test size(x, 1) > 0 - - x = ADRIA.component_params(dom.model, ADRIA.SeedCriteriaWeights) - @test size(x, 1) > 0 - - x = ADRIA.component_params(dom.model, ADRIA.FogCriteriaWeights) - @test size(x, 1) > 0 -end +if !@isdefined(ADRIA_DIR) + const ADRIA_DIR = pkgdir(ADRIA) + const TEST_DOMAIN_PATH = joinpath(ADRIA_DIR, "test", "data", "Test_domain") +end + +@testset "Extracting component parameters" begin + # Run full example to make sure nothing errors + ADRIA.setup() # Load and apply configuration options + + # Use a temporary directory for result location + ENV["ADRIA_OUTPUT_DIR"] = mktempdir() + + dom = ADRIA.load_domain(TEST_DOMAIN_PATH, "45") + + x = ADRIA.component_params(dom.model, Intervention) + @test size(x, 1) > 0 + + x = ADRIA.component_params(dom.model, ADRIA.Coral) + @test size(x, 1) > 0 + + x = ADRIA.component_params(dom.model, ADRIA.SeedCriteriaWeights) + @test size(x, 1) > 0 + + x = ADRIA.component_params(dom.model, ADRIA.FogCriteriaWeights) + @test size(x, 1) > 0 +end diff --git a/test/utils/scale.jl b/test/utils/scale.jl index 1b9d81e3e..d928111f4 100644 --- a/test/utils/scale.jl +++ b/test/utils/scale.jl @@ -6,16 +6,16 @@ @test linear_scale(0.1, :m, :cm) == round(10^1) # 0.1m = 10cm @test linear_scale(:m, :cm) == round(10^2) - @test linear_scale(:cm, :m) == round(10^-2, digits=2) + @test linear_scale(:cm, :m) == round(10^-2; digits=2) end @testset "quadratic_scale" begin quadratic_scale = ADRIA.quadratic_scale - @test quadratic_scale(100, :cm, :m) == round(10^-2, digits=2) # 100cm^2 = 1m + @test quadratic_scale(100, :cm, :m) == round(10^-2; digits=2) # 100cm^2 = 1m @test quadratic_scale(0.1, :m, :cm) == round(10^3) # 0.1m = 10cm @test quadratic_scale(:m, :cm) == round(10^4) - @test quadratic_scale(:cm, :m) == round(10^-4, digits=4) + @test quadratic_scale(:cm, :m) == round(10^-4; digits=4) end end