1
0
mirror of https://github.com/newnius/YAO-scheduler.git synced 2025-06-07 14:21:55 +00:00
This commit is contained in:
Newnius 2020-06-30 16:44:56 +08:00
parent 0e4266ea57
commit fc390837ff

View File

@ -743,6 +743,7 @@ func (pool *ResourcePool) doAcquireResource(job Job) []NodeStatus {
if pool.TotalGPU == 0 {
return []NodeStatus{}
}
var res []NodeStatus
loadRatio := float64(pool.UsingGPU) / float64(pool.TotalGPU)
/* first, choose sharable GPUs */
@ -794,30 +795,29 @@ func (pool *ResourcePool) doAcquireResource(job Job) []NodeStatus {
if len(candidates) > 0 {
node := candidates[0]
res := NodeStatus{}
res.ClientID = node.ClientID
res.ClientHost = node.ClientHost
res.NumCPU = task.NumberCPU
res.MemTotal = task.Memory
res.Status = availables[node.ClientHost][0:task.NumberGPU]
res = append(res, NodeStatus{})
res[0].ClientID = node.ClientID
res[0].ClientHost = node.ClientHost
res[0].NumCPU = task.NumberCPU
res[0].MemTotal = task.Memory
res[0].Status = availables[node.ClientHost][0:task.NumberGPU]
for i := range res.Status {
for i := range res[0].Status {
for j := range node.Status {
if res.Status[i].UUID == node.Status[j].UUID {
if res[0].Status[i].UUID == node.Status[j].UUID {
if node.Status[j].MemoryAllocated == 0 {
pool.UsingGPUMu.Lock()
pool.UsingGPU ++
pool.UsingGPUMu.Unlock()
}
node.Status[j].MemoryAllocated += task.MemoryGPU
res.Status[i].MemoryTotal = task.MemoryGPU
res[0].Status[i].MemoryTotal = task.MemoryGPU
}
}
}
for _, t := range res.Status {
for _, t := range res[0].Status {
pool.attach(t.UUID, job)
}
return []NodeStatus{res}
}
}
//log.Info(candidates)
@ -904,30 +904,29 @@ func (pool *ResourcePool) doAcquireResource(job Job) []NodeStatus {
//log.Info(candidates)
if len(candidates) > 0 {
node := candidates[0]
res := NodeStatus{}
res.ClientID = node.ClientID
res.ClientHost = node.ClientHost
res.NumCPU = task.NumberCPU
res.MemTotal = task.Memory
res.Status = availables[node.ClientHost][0:task.NumberGPU]
res = append(res, NodeStatus{})
res[0].ClientID = node.ClientID
res[0].ClientHost = node.ClientHost
res[0].NumCPU = task.NumberCPU
res[0].MemTotal = task.Memory
res[0].Status = availables[node.ClientHost][0:task.NumberGPU]
for i := range res.Status {
for i := range res[0].Status {
for j := range node.Status {
if res.Status[i].UUID == node.Status[j].UUID {
if res[0].Status[i].UUID == node.Status[j].UUID {
if node.Status[j].MemoryAllocated == 0 {
pool.UsingGPUMu.Lock()
pool.UsingGPU ++
pool.UsingGPUMu.Unlock()
}
node.Status[j].MemoryAllocated += task.MemoryGPU
res.Status[i].MemoryTotal = task.MemoryGPU
res[0].Status[i].MemoryTotal = task.MemoryGPU
}
}
}
for _, t := range res.Status {
for _, t := range res[0].Status {
pool.attach(t.UUID, job)
}
return []NodeStatus{res}
}
}
}
@ -939,7 +938,7 @@ func (pool *ResourcePool) doAcquireResource(job Job) []NodeStatus {
/* assign */
var ress []NodeStatus
if len(candidates) > 0 {
if len(candidates) > 0 && len(res) == 0 {
var nodesT []NodeStatus
for _, node := range candidates {
nodesT = append(nodesT, node.Copy())