@@ -91,8 +91,9 @@ type Manager struct {
91
91
92
92
tasksToDeploy chan <- * ResourceOffersDeploymentRequest
93
93
94
- reviveOffersTrg chan struct {}
95
- cq * controlcommands.CommandQueue
94
+ reviveOffersTrg chan struct {}
95
+ reviveOffersDone chan struct {}
96
+ cq * controlcommands.CommandQueue
96
97
97
98
tasksLaunched int
98
99
tasksFinished int
@@ -141,6 +142,7 @@ func NewManager(shutdown func(), internalEventCh chan<- event.Event) (taskman *M
141
142
taskman .cq = taskman .schedulerState .commandqueue
142
143
taskman .tasksToDeploy = taskman .schedulerState .tasksToDeploy
143
144
taskman .reviveOffersTrg = taskman .schedulerState .reviveOffersTrg
145
+ taskman .reviveOffersDone = taskman .schedulerState .reviveOffersDone
144
146
taskman .ackKilledTasks = newAcks ()
145
147
146
148
schedState .setupCli ()
@@ -156,7 +158,8 @@ func (m *Manager) newTaskForMesosOffer(
156
158
offer * mesos.Offer ,
157
159
descriptor * Descriptor ,
158
160
localBindMap channel.BindMap ,
159
- executorId mesos.ExecutorID ) (t * Task ) {
161
+ executorId mesos.ExecutorID ,
162
+ ) (t * Task ) {
160
163
newId := uid .New ().String ()
161
164
t = & Task {
162
165
name : fmt .Sprintf ("%s#%s" , descriptor .TaskClassName , newId ),
@@ -197,8 +200,8 @@ func getTaskClassList(taskClassesRequired []string) (taskClassList []*taskclass.
197
200
if err != nil {
198
201
return
199
202
}
200
- repo := repoManager .GetAllRepos ()[tempRepo .GetIdentifier ()] //get IRepo pointer from RepoManager
201
- if repo == nil { //should never end up here
203
+ repo := repoManager .GetAllRepos ()[tempRepo .GetIdentifier ()] // get IRepo pointer from RepoManager
204
+ if repo == nil { // should never end up here
202
205
return nil , errors .New ("getTaskClassList: repo not found for " + taskClass )
203
206
}
204
207
@@ -223,7 +226,6 @@ func getTaskClassList(taskClassesRequired []string) (taskClassList []*taskclass.
223
226
taskInfo := strings .Split (taskPath , "/tasks/" )
224
227
if len (taskInfo ) == 1 {
225
228
taskFilename = taskInfo [0 ]
226
-
227
229
} else {
228
230
taskFilename = taskInfo [1 ]
229
231
}
@@ -280,7 +282,7 @@ func (m *Manager) removeInactiveClasses() {
280
282
return
281
283
}
282
284
283
- func (m * Manager ) RemoveReposClasses (repoPath string ) { //Currently unused
285
+ func (m * Manager ) RemoveReposClasses (repoPath string ) { // Currently unused
284
286
utils .EnsureTrailingSlash (& repoPath )
285
287
286
288
_ = m .classes .Do (func (classMap * map [string ]* taskclass.Class ) error {
@@ -327,7 +329,6 @@ func (m *Manager) RefreshClasses(taskClassesRequired []string) (err error) {
327
329
}
328
330
329
331
func (m * Manager ) acquireTasks (envId uid.ID , taskDescriptors Descriptors ) (err error ) {
330
-
331
332
/*
332
333
Here's what's gonna happen:
333
334
1) check if any tasks are already in Roster, whether they are already locked
@@ -516,7 +517,7 @@ func (m *Manager) acquireTasks(envId uid.ID, taskDescriptors Descriptors) (err e
516
517
timeReviveOffers := time .Now ()
517
518
timeDeployMu := time .Now ()
518
519
m .reviveOffersTrg <- struct {}{} // signal scheduler to revive offers
519
- <- m .reviveOffersTrg // we only continue when it's done
520
+ <- m .reviveOffersDone // we only continue when it's done
520
521
utils .TimeTrack (timeReviveOffers , "acquireTasks: revive offers" ,
521
522
log .WithField ("tasksToRun" , len (tasksToRun )).
522
523
WithField ("partition" , envId ))
@@ -597,7 +598,7 @@ func (m *Manager) acquireTasks(envId uid.ID, taskDescriptors Descriptors) (err e
597
598
// can't lock some of them, so we must roll back and keep them
598
599
// unlocked in the roster.
599
600
var deployedTaskIds []string
600
- for taskPtr , _ := range deployedTasks {
601
+ for taskPtr := range deployedTasks {
601
602
taskPtr .SetParent (nil )
602
603
deployedTaskIds = append (deployedTaskIds , taskPtr .taskId )
603
604
}
@@ -612,11 +613,11 @@ func (m *Manager) acquireTasks(envId uid.ID, taskDescriptors Descriptors) (err e
612
613
}
613
614
614
615
// Finally, we write to the roster. Point of no return!
615
- for taskPtr , _ := range deployedTasks {
616
+ for taskPtr := range deployedTasks {
616
617
m .roster .append (taskPtr )
617
618
}
618
619
if deploymentSuccess {
619
- for taskPtr , _ := range deployedTasks {
620
+ for taskPtr := range deployedTasks {
620
621
taskPtr .GetParent ().SetTask (taskPtr )
621
622
}
622
623
for taskPtr , descriptor := range tasksAlreadyRunning {
@@ -629,7 +630,6 @@ func (m *Manager) acquireTasks(envId uid.ID, taskDescriptors Descriptors) (err e
629
630
}
630
631
631
632
func (m * Manager ) releaseTasks (envId uid.ID , tasks Tasks ) error {
632
-
633
633
taskReleaseErrors := make (map [string ]error )
634
634
taskIdsReleased := make ([]string , 0 )
635
635
@@ -686,7 +686,7 @@ func (m *Manager) configureTasks(envId uid.ID, tasks Tasks) error {
686
686
taskPath := task .GetParentRolePath ()
687
687
for inbChName , endpoint := range task .GetLocalBindMap () {
688
688
var bindMapKey string
689
- if strings .HasPrefix (inbChName , "::" ) { //global channel alias
689
+ if strings .HasPrefix (inbChName , "::" ) { // global channel alias
690
690
bindMapKey = inbChName
691
691
692
692
// deduplication
@@ -785,7 +785,6 @@ func (m *Manager) configureTasks(envId uid.ID, tasks Tasks) error {
785
785
func (m * Manager ) transitionTasks (envId uid.ID , tasks Tasks , src string , event string , dest string , commonArgs controlcommands.PropertyMap ) error {
786
786
notify := make (chan controlcommands.MesosCommandResponse )
787
787
receivers , err := tasks .GetMesosCommandTargets ()
788
-
789
788
if err != nil {
790
789
return err
791
790
}
@@ -870,7 +869,6 @@ func (m *Manager) TriggerHooks(envId uid.ID, tasks Tasks) error {
870
869
871
870
notify := make (chan controlcommands.MesosCommandResponse )
872
871
receivers , err := tasks .GetMesosCommandTargets ()
873
-
874
872
if err != nil {
875
873
return err
876
874
}
@@ -935,7 +933,6 @@ func (m *Manager) GetTask(id string) *Task {
935
933
}
936
934
937
935
func (m * Manager ) updateTaskState (taskId string , state string ) {
938
-
939
936
taskPtr := m .roster .getByTaskId (taskId )
940
937
if taskPtr == nil {
941
938
log .WithField ("taskId" , taskId ).
@@ -989,7 +986,7 @@ func (m *Manager) updateTaskStatus(status *mesos.TaskStatus) {
989
986
}
990
987
if ack , ok := m .ackKilledTasks .getValue (taskId ); ok {
991
988
ack <- struct {}{}
992
- //close(ack) // It can even be left open?
989
+ // close(ack) // It can even be left open?
993
990
}
994
991
995
992
return
@@ -1030,7 +1027,6 @@ func (m *Manager) updateTaskStatus(status *mesos.TaskStatus) {
1030
1027
1031
1028
// Kill all tasks outside an environment (all unlocked tasks)
1032
1029
func (m * Manager ) Cleanup () (killed Tasks , running Tasks , err error ) {
1033
-
1034
1030
toKill := m .roster .filtered (func (t * Task ) bool {
1035
1031
return ! t .IsLocked ()
1036
1032
})
0 commit comments