Skip to content

Commit

Permalink
Merge pull request #390 from leondavi/nerlplanner
Browse files Browse the repository at this point in the history
[NERLPLANNER] Fix issues regarding lossArgs changes
  • Loading branch information
leondavi authored Aug 10, 2024
2 parents 696ba6c + 11b6416 commit b71bff9
Show file tree
Hide file tree
Showing 10 changed files with 29 additions and 11 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -91,6 +91,8 @@
"_doc_layer_functions_scaler": " none:1 | MinMax:2 | MeanStd:3 | STD:4 | Log:5 |",
"lossMethod": "2",
"_doc_lossMethod": " SSE:1 | MSE:2 | NSE:3 | MinkowskiE:4 | WSE:5 | CEE:6 |",
"lossArgs:": "",
"_doc_lossArgs": "Arguments to loss function. Regularization: reg=L2, reg=L1, reg=NoRegularization (can be also empty)",
"lr": "0.01",
"_doc_lr": "Positve float",
"epochs": "1",
Expand Down Expand Up @@ -124,6 +126,8 @@
"_doc_layer_functions_scaler": " none:1 | MinMax:2 | MeanStd:3 | STD:4 | Log:5 |",
"lossMethod": "2",
"_doc_lossMethod": " SSE:1 | MSE:2 | NSE:3 | MinkowskiE:4 | WSE:5 | CEE:6 |",
"lossArgs:": "",
"_doc_lossArgs": "Arguments to loss function. Regularization: reg=L2, reg=L1, reg=NoRegularization (can be also empty)",
"lr": "0.01",
"_doc_lr": "Positve float",
"epochs": "1",
Expand All @@ -142,4 +146,4 @@
"_doc_distributedSystemToken": "Token that associates distributed group of workers and parameter-server"
}
}
}
}
2 changes: 2 additions & 0 deletions inputJsonsFiles/Workers/distributed_worker.json
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@
"_doc_layer_functions_scaler": " none:1 | MinMax:2 | MeanStd:3 | STD:4 | Log:5 |",
"lossMethod": "2",
"_doc_lossMethod": " SSE:1 | MSE:2 | NSE:3 | MinkowskiE:4 | WSE:5 | CEE:6 |",
"lossArgs:": "",
"_doc_lossArgs": "Arguments to loss function. Regularization: reg=L2, reg=L1, reg=NoRegularization (can be also empty)",
"lr": "0.001",
"_doc_lr": "Positve float",
"epochs": "1",
Expand Down
4 changes: 3 additions & 1 deletion inputJsonsFiles/Workers/worker_ae_classifier.json
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
"_doc_layer_functions_scaler": " none:1 | MinMax:2 | MeanStd:3 | STD:4 | Log:5 |",
"lossMethod": "2",
"_doc_lossMethod": " SSE:1 | MSE:2 | NSE:3 | MinkowskiE:4 | WSE:5 | CEE:6 |",
"lossArgs:": "",
"_doc_lossArgs": "Arguments to loss function. Regularization: reg=L2, reg=L1, reg=NoRegularization (can be also empty)",
"lr": "0.01",
"_doc_lr": "Positve float",
"epochs": "1",
Expand All @@ -30,4 +32,4 @@
"_doc_distributedSystemArgs": "String",
"distributedSystemToken": "none",
"_doc_distributedSystemToken": "Token that associates distributed group of workers and parameter-server"
}
}
4 changes: 3 additions & 1 deletion inputJsonsFiles/Workers/worker_fed_client.json
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
"_doc_layer_functions_scaler": " none:1 | MinMax:2 | MeanStd:3 | STD:4 | Log:5 |",
"lossMethod": "2",
"_doc_lossMethod": " SSE:1 | MSE:2 | NSE:3 | MinkowskiE:4 | WSE:5 | CEE:6 |",
"lossArgs:": "",
"_doc_lossArgs": "Arguments to loss function. Regularization: reg=L2, reg=L1, reg=NoRegularization (can be also empty)",
"lr": "0.01",
"_doc_lr": "Positve float",
"epochs": "1",
Expand All @@ -30,4 +32,4 @@
"_doc_distributedSystemArgs": "String",
"distributedSystemToken": "9922u",
"_doc_distributedSystemToken": "Token that associates distributed group of workers and parameter-server"
}
}
4 changes: 3 additions & 1 deletion inputJsonsFiles/Workers/worker_fed_server.json
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
"_doc_layer_functions_scaler": " none:1 | MinMax:2 | MeanStd:3 | STD:4 | Log:5 |",
"lossMethod": "2",
"_doc_lossMethod": " SSE:1 | MSE:2 | NSE:3 | MinkowskiE:4 | WSE:5 | CEE:6 |",
"lossArgs:": "",
"_doc_lossArgs": "Arguments to loss function. Regularization: reg=L2, reg=L1, reg=NoRegularization (can be also empty)",
"lr": "0.01",
"_doc_lr": "Positve float",
"epochs": "1",
Expand All @@ -30,4 +32,4 @@
"_doc_distributedSystemArgs": "String",
"distributedSystemToken": "9922u",
"_doc_distributedSystemToken": "Token that associates distributed group of workers and parameter-server"
}
}
4 changes: 3 additions & 1 deletion inputJsonsFiles/Workers/worker_latest.json
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@
"_doc_layer_functions_scaler": " none:1 | MinMax:2 | MeanStd:3 | STD:4 | Log:5 |",
"lossMethod": "2",
"_doc_lossMethod": " SSE:1 | MSE:2 | NSE:3 | MinkowskiE:4 | WSE:5 | CEE:6 |",
"lossArgs:": "",
"_doc_lossArgs": "Arguments to loss function. Regularization: reg=L2, reg=L1, reg=NoRegularization (can be also empty)",
"lr": "0.01",
"_doc_lr": "Positve float",
"epochs": "1",
Expand All @@ -28,4 +30,4 @@
"_doc_distributedSystemArgs": "String",
"distributedSystemToken": "none",
"_doc_distributedSystemToken": "Token that associates distributed group of workers and parameter-server"
}
}
4 changes: 3 additions & 1 deletion inputJsonsFiles/Workers/worker_synt_ori_new.json
Original file line number Diff line number Diff line change
Expand Up @@ -14,6 +14,8 @@
"_doc_layer_functions_scaler": " none:1 | MinMax:2 | MeanStd:3 | STD:4 | Log:5 |",
"lossMethod": "2",
"_doc_lossMethod": " SSE:1 | MSE:2 | NSE:3 | MinkowskiE:4 | WSE:5 | CEE:6 |",
"lossArgs:": "",
"_doc_lossArgs": "Arguments to loss function. Regularization: reg=L2, reg=L1, reg=NoRegularization (can be also empty)",
"lr": "0.001",
"_doc_lr": "Positve float",
"epochs": "1",
Expand All @@ -30,4 +32,4 @@
"_doc_distributedSystemArgs": "String",
"distributedSystemToken": "none",
"_doc_distributedSystemToken": "Token that associates distributed group of workers and parameter-server"
}
}
4 changes: 3 additions & 1 deletion inputJsonsFiles/Workers/worker_test_synt_1d_2c_1s_4r_4w.json
Original file line number Diff line number Diff line change
Expand Up @@ -12,6 +12,8 @@
"_doc_layer_functions_scaler": " none:1 | MinMax:2 | MeanStd:3 | STD:4 | Log:5 |",
"lossMethod": "2",
"_doc_lossMethod": " SSE:1 | MSE:2 | NSE:3 | MinkowskiE:4 | WSE:5 | CEE:6 |",
"lossArgs:": "",
"_doc_lossArgs": "Arguments to loss function. Regularization: reg=L2, reg=L1, reg=NoRegularization (can be also empty)",
"lr": "0.001",
"_doc_lr": "Positve float",
"epochs": "1",
Expand All @@ -28,4 +30,4 @@
"_doc_distributedSystemArgs": "String",
"distributedSystemToken": "none",
"_doc_distributedSystemToken": "Token that associates distributed group of workers and parameter-server"
}
}
2 changes: 1 addition & 1 deletion src_py/nerlPlanner/JsonElementWorker.py
Original file line number Diff line number Diff line change
Expand Up @@ -210,7 +210,7 @@ def get_as_dict(self, documentation = True):
(KEY_LOSS_METHOD, self.LossMethod),
(KEY_LOSS_METHOD_DOC, VAL_LOSS_METHOD_DOC),
(KEY_LOSS_ARGS, self.LossArgs),
(KEY_LOSS_ARGS_DOC, VAL_LOSS_ARGS_DOC)
(KEY_LOSS_ARGS_DOC, VAL_LOSS_ARGS_DOC),
(KEY_LEARNING_RATE, self.LearningRate),
(KEY_LEARNING_RATE_DOC, VAL_LEARNING_RATE_DOC),
(KEY_EPOCHS, self.Epochs.get_value_str()),
Expand Down
6 changes: 3 additions & 3 deletions src_py/nerlPlanner/WinWorkerDialog.py
Original file line number Diff line number Diff line change
Expand Up @@ -75,7 +75,7 @@ def WinWorkerDialog():
OptimizationArgs = "none"
LossMethodStr = ""
LossMethod = None # None
LossArgs = "none"
LossArgs = ""
LearningRate = None
Epochs = "1"
LayersFunctionsList = ""
Expand Down Expand Up @@ -229,7 +229,7 @@ def ui_update_all_values(WorkerWindow):
first_element_condition = bool(LayerTypesList[0] != "3")
if worker_parameters_conditions and filepath_condition and first_element_condition:
# Update here when adding new fields to the worker
newWorker = Worker("new",LayersSizesList, ModelTypeStr, ModelType, ModelArgsStr, OptimizationType, OptimizationArgs , LossMethodStr, LossArgs, LossMethod,
newWorker = Worker("new",LayersSizesList, ModelTypeStr, ModelType, ModelArgsStr, OptimizationType, OptimizationArgs , LossMethodStr, LossMethod, LossArgs,
LearningRate, Epochs, LayersFunctionsList, LayerTypesList, InfraType, DistributedSystemType, DistributedSystemArgs, DistributedSystemToken)
newWorker.save_as_json(FilePath.as_posix(), WithDocumentation)
sg.popup_auto_close("Successfully Created", keep_on_top=True)
Expand All @@ -253,7 +253,7 @@ def ui_update_all_values(WorkerWindow):
loaded_worker_dict = {}
with open(FilePathLoad) as jsonFile:
loaded_worker_dict = json.load(jsonFile)
(LayersSizesList, ModelTypeStr, ModelType, ModelArgsStr, OptimizationType, OptimizationArgs ,LossMethodStr, LossMethod, LearningRate, Epochs,
(LayersSizesList, ModelTypeStr, ModelType, ModelArgsStr, OptimizationType, OptimizationArgs ,LossMethodStr, LossMethod, LossArgs, LearningRate, Epochs,
LayersFunctionsList, LayerTypesList, InfraType, DistributedSystemType, DistributedSystemArgs ,DistributedSystemToken) = Worker.load_from_dict(loaded_worker_dict, get_params=True)
ui_update_all_values(WorkerWindow)

Expand Down

0 comments on commit b71bff9

Please sign in to comment.