mirror of
https://github.com/newnius/YAO-agent.git
synced 2025-06-07 22:01:55 +00:00
update api
This commit is contained in:
parent
814f252b49
commit
022212a6e0
46
executor.py
46
executor.py
@ -82,36 +82,30 @@ class MyHandler(BaseHTTPRequestHandler):
|
|||||||
docker_workspace = form.getvalue('workspace')
|
docker_workspace = form.getvalue('workspace')
|
||||||
docker_gpus = form.getvalue('gpus')
|
docker_gpus = form.getvalue('gpus')
|
||||||
docker_mem_limit = form.getvalue('mem_limit')
|
docker_mem_limit = form.getvalue('mem_limit')
|
||||||
docker_cpu_limit = int(form.getvalue('cpu_limit'))
|
docker_cpu_limit = form.getvalue('cpu_limit')
|
||||||
docker_network = form.getvalue('network')
|
docker_network = form.getvalue('network')
|
||||||
|
|
||||||
try:
|
try:
|
||||||
client = docker.APIClient(base_url='unix://var/run/docker.sock')
|
script = " ".join([
|
||||||
|
"docker run",
|
||||||
|
"--gpus '\"device=" + docker_gpus + "\"'",
|
||||||
|
"--detach=True",
|
||||||
|
"--hostname " + docker_name,
|
||||||
|
"--network " + docker_network,
|
||||||
|
"--network-alias " + docker_name,
|
||||||
|
"--memory-reservation " + docker_mem_limit,
|
||||||
|
"--cpus " + docker_cpu_limit,
|
||||||
|
"--env repo=" + docker_workspace,
|
||||||
|
docker_image,
|
||||||
|
docker_cmd
|
||||||
|
])
|
||||||
|
|
||||||
host_config = client.create_host_config(
|
client = docker.from_env()
|
||||||
mem_limit=docker_mem_limit,
|
container = client.containers.get('yao-agent-helper')
|
||||||
cpu_shares=docker_cpu_limit * 1024
|
exit_code, output = container.exec_run('sh -c \'' + script + '\'')
|
||||||
)
|
msg = {"code": 0, "id": output.decode('utf-8').rstrip('\n')}
|
||||||
networking_config = client.create_networking_config(
|
if exit_code != 0:
|
||||||
endpoints_config={
|
msg["code"] = 1
|
||||||
docker_network: client.create_endpoint_config(
|
|
||||||
aliases=[docker_name],
|
|
||||||
)
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
container = client.create_container(
|
|
||||||
image=docker_image,
|
|
||||||
command=docker_cmd,
|
|
||||||
hostname=docker_name,
|
|
||||||
detach=True,
|
|
||||||
host_config=host_config,
|
|
||||||
environment={"repo": docker_workspace, "NVIDIA_VISIBLE_DEVICES": docker_gpus},
|
|
||||||
networking_config=networking_config,
|
|
||||||
runtime='nvidia'
|
|
||||||
)
|
|
||||||
client.start(container)
|
|
||||||
msg = {"code": 0, "id": container['Id']}
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
msg = {"code": 1, "error": str(e)}
|
msg = {"code": 1, "error": str(e)}
|
||||||
|
|
||||||
|
23
test.py
23
test.py
@ -4,8 +4,7 @@ import docker
|
|||||||
def run():
|
def run():
|
||||||
client = docker.from_env()
|
client = docker.from_env()
|
||||||
try:
|
try:
|
||||||
print(client.containers.run(image="alpine", command="nvid", environment={"KEY": "value"}))
|
print(client.containers.run(image="alpine", command="pwd", environment={"KEY": "value"}))
|
||||||
# print(client.containers.run(image="nvidia/cuda:9.0-base", command="nvidia-smi", environment={"KEY": "value"}, runtime="nvidia"))
|
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(e.__class__.__name__, e)
|
print(e.__class__.__name__, e)
|
||||||
|
|
||||||
@ -78,9 +77,9 @@ def create_container():
|
|||||||
)
|
)
|
||||||
networking_config = client.create_networking_config(
|
networking_config = client.create_networking_config(
|
||||||
endpoints_config={
|
endpoints_config={
|
||||||
'yao-net-1201': client.create_endpoint_config(
|
# 'yao-net-1201': client.create_endpoint_config(
|
||||||
aliases=['node1'],
|
# aliases=['node1'],
|
||||||
)
|
# )
|
||||||
}
|
}
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -95,10 +94,22 @@ def create_container():
|
|||||||
runtime='nvidia'
|
runtime='nvidia'
|
||||||
)
|
)
|
||||||
client.start(container)
|
client.start(container)
|
||||||
|
print(container)
|
||||||
|
|
||||||
|
|
||||||
|
def exec_run():
|
||||||
|
client = docker.from_env()
|
||||||
|
container = client.containers.get('yao-agent-helper')
|
||||||
|
exit_code, output = container.exec_run(cmd="sh -c 'docker run --gpus all --detach=True tensorflow/tensorflow:1.14.0-gpu nvidia-smi'")
|
||||||
|
if exit_code == 0:
|
||||||
|
print(output.decode('utf-8').rstrip('\n'))
|
||||||
|
|
||||||
|
|
||||||
# create_network()
|
# create_network()
|
||||||
# list_networks()
|
# list_networks()
|
||||||
|
|
||||||
# remove_network()
|
# remove_network()
|
||||||
get_status('af121babda9b')
|
# get_status('af121babda9b')
|
||||||
|
# exec_run()
|
||||||
|
# run()
|
||||||
|
create_container()
|
||||||
|
Loading…
Reference in New Issue
Block a user