Skip to content

Commit 6e818d2

Browse files
replace with error(). The former was used for logging, the latter is used to throw exceptions.
1 parent 25a94fb commit 6e818d2

File tree

5 files changed

+19
-19
lines changed

5 files changed

+19
-19
lines changed

src/clustering/extreme_vals.jl

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ function simple_extr_val_sel(data::ClustData,
2020
elseif rep_mod_method=="append"
2121
data_mod = input_data_modification(data,idcs)
2222
else
23-
@error("rep_mod_method - "*rep_mod_method*" - does not exist")
23+
error("rep_mod_method - "*rep_mod_method*" - does not exist")
2424
end
2525
return data_mod,extr_vals,idcs
2626
end
@@ -104,15 +104,15 @@ function simple_extr_val_ident(clust_data::ClustData,
104104
data_eval[1,k] = sum(data[:,k:(k+delta_period)])
105105
end
106106
else
107-
@error("peak_def - "*peak_def*" and consecutive_periods $consecutive_periods - not defined")
107+
error("peak_def - "*peak_def*" and consecutive_periods $consecutive_periods - not defined")
108108
end
109109
# find minimum or maximum index. Second argument returns cartesian indices, second argument of that is the column (period) index
110110
if extremum=="max"
111111
idx_k = findmax(data_eval)[2][2]
112112
elseif extremum=="min"
113113
idx_k = findmin(data_eval)[2][2]
114114
else
115-
@error("extremum - "*extremum*" - not defined")
115+
error("extremum - "*extremum*" - not defined")
116116
end
117117
idx=collect(idx_k:(idx_k+delta_period))
118118
return idx
@@ -143,8 +143,8 @@ function input_data_modification(data::ClustData,
143143
k_ids_dn=deepcopy(data.k_ids)
144144
#check for uniqueness and right sorting (however just those one representing)
145145
k_ids_check=k_ids_dn[findall(k_ids_dn.!=0)]
146-
allunique(k_ids_check) || @error "the provided clust_data.k_ids are not unique - The clust_data is probably the result of a clustering already."
147-
sort(k_ids_check)==k_ids_check || @error "the provided clust_data.k_ids are not monoton increasing - The clust_data is probably the result of a clustering already."
146+
allunique(k_ids_check) || error("the provided clust_data.k_ids are not unique - The clust_data is probably the result of a clustering already.")
147+
sort(k_ids_check)==k_ids_check || error("the provided clust_data.k_ids are not monoton increasing - The clust_data is probably the result of a clustering already.")
148148
#get all k-ids that are represented within this clust-data
149149
k_ids_dn_data=k_ids_dn[findall(data.k_ids.!=0)]
150150
for k in sort(extr_val_idcs)
@@ -203,7 +203,7 @@ function extreme_val_output(data::ClustData,
203203
# assign it to the full original time-series
204204
k_ids_ed[index_k_ids_data]=k_ids_ed_data
205205
else
206-
@error("rep_mod_method - "*rep_mod_method*" - does not exist")
206+
error("rep_mod_method - "*rep_mod_method*" - does not exist")
207207
end
208208
delta_t_ed=data.delta_t[:,unique_extr_val_idcs]
209209
extr_vals = ClustData(data.region,data.years,K_ed,data.T,data_ed,weights_ed,k_ids_ed;delta_t=delta_t_ed,mean=data.mean,sdv=data.sdv)

src/clustering/run_clust.jl

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -375,7 +375,7 @@ function run_clust_kmedoids_exact_medoid(
375375
kmexact_optimizer=0
376376
)
377377

378-
(typeof(kmexact_optimizer)==Int) && @error("Please provide a kmexact_optimizer (Gurobi Environment). See test file for example")
378+
(typeof(kmexact_optimizer)==Int) && error("Please provide a kmexact_optimizer (Gurobi Environment). See test file for example")
379379

380380
# TODO: optional in future: pass distance metric as kwargs
381381
dist = SqEuclidean()

src/utils/datastructs.jl

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -190,7 +190,7 @@ function ClustData(region::String,
190190
mean::Dict{String,Array}=Dict{String,Array}(),
191191
sdv::Dict{String,Array}=Dict{String,Array}()
192192
)
193-
isempty(data) && @error("Need to provide at least one input data stream")
193+
isempty(data) && error("Need to provide at least one input data stream")
194194
mean_sdv_provided = ( !isempty(mean) && !isempty(sdv))
195195
if !mean_sdv_provided
196196
for (k,v) in data
@@ -282,7 +282,7 @@ function ClustDataMerged(data::ClustData)
282282
push!(data_type,k)
283283
end
284284
if maximum(data.delta_t)!=1
285-
throw(@error "You cannot recluster data with different Δt")
285+
error("You cannot recluster data with different Δt")
286286
end
287287
ClustDataMerged(data.region,data.years,data.K,data.T,data_merged,data_type,data.weights,data.mean,data.sdv,data.delta_t,data.k_ids)
288288
end

src/utils/load_data.jl

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -49,7 +49,7 @@ function load_timeseries_data(data_path::String;
4949
data_name=split(full_data_name,".")[1]
5050
K=add_timeseries_data!(dt, data_name, dirname(data_path); K=K, T=T, years=years)
5151
else
52-
throw(@error("The path $data_path is neither recognized as a directory nor as a file"))
52+
error("The path $data_path is neither recognized as a directory nor as a file")
5353
end
5454
# Store the data
5555
ts_input_data = ClustData(FullInputData(region, years, num, dt),K,T)
@@ -94,7 +94,7 @@ function load_timeseries_data(existing_data::Symbol;
9494
elseif existing_data == :CEP_GER18
9595
data_path=joinpath(data_path,"TS_GER_18")
9696
else
97-
@error("The symbol - $existing_data - does not exist")
97+
error("The symbol - $existing_data - does not exist")
9898
end
9999
return load_timeseries_data(data_path;region=region,T=T,years=years,att=att)
100100
end
@@ -135,7 +135,7 @@ function add_timeseries_data!(dt::Dict{String,Array},
135135
if !(column[1] in [time_name, year_name])
136136
K_calc=Int(floor(length(column[2])/T))
137137
if K_calc!=K && K!=0
138-
@error("The time_series $(column[1]) has K=$K_calc != K=$K of the previous")
138+
error("The time_series $(column[1]) has K=$K_calc != K=$K of the previous")
139139
else
140140
K=K_calc
141141
end
@@ -158,7 +158,7 @@ function find_column_name(df::DataFrame, name_itr::Array{Symbol,1}; error::Bool=
158158
end
159159
end
160160
if error
161-
col_name!=:none || throw(@error "No $(name_itr) in $(repr(df)).")
161+
col_name!=:none || error("No $(name_itr) in $(repr(df)).")
162162
else
163163
col_name!=:none || @warn "No $(name_itr) in $(repr(df))."
164164
end
@@ -174,8 +174,8 @@ If the number of periods of the `ts_weather` data isn't a multiple of the `ts`-t
174174
"""
175175
function combine_timeseries_weather_data(ts::ClustData,
176176
ts_weather::ClustData)
177-
ts.T==ts_weather.T || throw(@error "The number of timesteps per period is not the same: `ts.T=$(ts.T)$(ts_weather.T)=ts_weather.T`")
178-
ts.K<=ts_weather.K || throw(@error "The number of timesteps in the `ts`-timeseries isn't shorter or equal to the ones in the `ts_weather`-timeseries.")
177+
ts.T==ts_weather.T || error("The number of timesteps per period is not the same: `ts.T=$(ts.T)$(ts_weather.T)=ts_weather.T`")
178+
ts.K<=ts_weather.K || error("The number of timesteps in the `ts`-timeseries isn't shorter or equal to the ones in the `ts_weather`-timeseries.")
179179
ts_weather.K%ts.K==0 || @warn "The number of periods of the `ts_weather` data isn't a multiple of the other `ts`-timeseries: periods 1 to $(ts_weather.K%ts.K) are attached to the end of the new combined timeseries."
180180
ts_data=deepcopy(ts_weather.data)
181181
ts_mean=deepcopy(ts_weather.mean)

src/utils/utils.jl

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -92,7 +92,7 @@ function z_normalize(data::Array;
9292
end
9393
return data_norm, hourly_mean, hourly_sdv #TODO change the output here to an immutable struct with three fields - use struct - "composite type"
9494
else
95-
@error("scope _ ",scope," _ not defined.")
95+
error("scope _ ",scope," _ not defined.")
9696
end
9797
end # function z_normalize
9898

@@ -102,7 +102,7 @@ provide idx should usually be done as default within function call in order to e
102102
"""
103103
function undo_z_normalize(data_norm_merged::Array,mn::Dict{String,Array},sdv::Dict{String,Array};idx=[])
104104
T = div(size(data_norm_merged)[1],length(keys(mn))) # number of time steps in one period. div() is integer division like in c++, yields integer (instead of float as in normal division)
105-
0 != rem(size(data_norm_merged)[1],length(keys(mn))) && @error("dimension mismatch") # rem() checks the remainder. If not zero, throw error.
105+
0 != rem(size(data_norm_merged)[1],length(keys(mn))) && error("dimension mismatch") # rem() checks the remainder. If not zero, throw error.
106106
data_merged = zeros(size(data_norm_merged))
107107
i=0
108108
for (attr,mn_a) in mn
@@ -137,7 +137,7 @@ function undo_z_normalize(data_norm::Array, mn::Array, sdv::Array; idx=[])
137137
data = data_norm * Diagonal(summed_sdv) + ones(size(data_norm,1)) * summed_mean'
138138
return data
139139
elseif isempty(idx)
140-
@error("no idx provided in undo_z_normalize")
140+
error("no idx provided in undo_z_normalize")
141141
end
142142
end
143143

@@ -269,7 +269,7 @@ This is the DEFAULT resize medoids function
269269
Takes in centers (typically medoids) and normalizes them such that the yearly average of the clustered data is the same as the yearly average of the original data.
270270
"""
271271
function resize_medoids(data::ClustData,centers::Array,weights::Array)
272-
(data.T * length(keys(data.data)) != size(centers,1) ) && @error("dimension missmatch between full input data and centers")
272+
(data.T * length(keys(data.data)) != size(centers,1) ) && error("dimension missmatch between full input data and centers")
273273
centers_res = zeros(size(centers))
274274
# go through the attributes within data
275275
i=0

0 commit comments

Comments
 (0)