-
Notifications
You must be signed in to change notification settings - Fork 4k
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Minor refactor to scale-up orchestrator for more re-usability #7649
base: master
Are you sure you want to change the base?
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -222,7 +222,9 @@ func (o *ScaleUpOrchestrator) ScaleUp( | |
return buildNoOptionsAvailableStatus(markedEquivalenceGroups, skippedNodeGroups, nodeGroups), nil | ||
} | ||
var scaleUpStatus *status.ScaleUpStatus | ||
createNodeGroupResults, scaleUpStatus, aErr = o.CreateNodeGroup(bestOption, nodeInfos, schedulablePodGroups, podEquivalenceGroups, daemonSets, allOrNothing) | ||
oldId := bestOption.NodeGroup.Id() | ||
initializer := NewAsyncNodeGroupInitializer(bestOption.NodeGroup, nodeInfos[oldId], o.scaleUpExecutor, o.taintConfig, daemonSets, o.processors.ScaleUpStatusProcessor, o.autoscalingContext, allOrNothing) | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Creation of the initializer used to be flag guarded and here it is no longer the case - is that intentional? If not, can you keep the flag guard? There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It may not be ideal, but I preferred this over the alternatives:
One obvious option that might make more sense (PLMK WDYT) is to split off orchestrator's |
||
createNodeGroupResults, scaleUpStatus, aErr = o.CreateNodeGroup(bestOption, nodeInfos, schedulablePodGroups, podEquivalenceGroups, daemonSets, initializer) | ||
if aErr != nil { | ||
return scaleUpStatus, aErr | ||
} | ||
|
@@ -501,15 +503,14 @@ func (o *ScaleUpOrchestrator) CreateNodeGroup( | |
schedulablePodGroups map[string][]estimator.PodEquivalenceGroup, | ||
podEquivalenceGroups []*equivalence.PodGroup, | ||
daemonSets []*appsv1.DaemonSet, | ||
allOrNothing bool, | ||
initializer nodegroups.AsyncNodeGroupInitializer, | ||
) ([]nodegroups.CreateNodeGroupResult, *status.ScaleUpStatus, errors.AutoscalerError) { | ||
createNodeGroupResults := make([]nodegroups.CreateNodeGroupResult, 0) | ||
|
||
oldId := initialOption.NodeGroup.Id() | ||
var createNodeGroupResult nodegroups.CreateNodeGroupResult | ||
var aErr errors.AutoscalerError | ||
if o.autoscalingContext.AsyncNodeGroupsEnabled { | ||
initializer := newAsyncNodeGroupInitializer(initialOption.NodeGroup, nodeInfos[oldId], o.scaleUpExecutor, o.taintConfig, daemonSets, o.processors.ScaleUpStatusProcessor, o.autoscalingContext, allOrNothing) | ||
createNodeGroupResult, aErr = o.processors.NodeGroupManager.CreateNodeGroupAsync(o.autoscalingContext, initialOption.NodeGroup, initializer) | ||
} else { | ||
createNodeGroupResult, aErr = o.processors.NodeGroupManager.CreateNodeGroup(o.autoscalingContext, initialOption.NodeGroup) | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Wouldn't it make more sense as a part of the
errors
package?