From 71bd27f530f6b9ca29d110b0f33a498995886911 Mon Sep 17 00:00:00 2001 From: Newnius Date: Tue, 12 Mar 2019 16:28:04 +0800 Subject: [PATCH] update --- .gitignore | 7 +++- Dockerfile | 17 ++++++++ bootstrap.sh | 6 +++ executor.py | 22 +++++++++++ server.py | 108 +++++++++++++++++++++++++++++++++++++++++++++++++++ yao-agent.py | 13 ++++++- 6 files changed, 169 insertions(+), 4 deletions(-) create mode 100644 Dockerfile create mode 100755 bootstrap.sh create mode 100644 executor.py create mode 100644 server.py diff --git a/.gitignore b/.gitignore index 5180b48..8366402 100644 --- a/.gitignore +++ b/.gitignore @@ -4,12 +4,15 @@ status.xml -# IDEA - +# IDEA IntelliJ *.iml .idea/ +# Tmp files +*.swp +*~ + # Byte-compiled / optimized / DLL files diff --git a/Dockerfile b/Dockerfile new file mode 100644 index 0000000..86fd624 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,17 @@ +FROM nvidia/cuda:9.0-base + +MAINTAINER Newnius + +RUN apt update && \ + apt install -y python3 python3-pip + +RUN pip3 install docker kafka + +ADD bootstrap.sh /etc/bootstrap.sh + +ADD yao-agent.py /root/yao-agent.py +ADD server.py /root/server.py + +WORKDIR /root + +CMD ["/etc/bootstrap.sh"] \ No newline at end of file diff --git a/bootstrap.sh b/bootstrap.sh new file mode 100755 index 0000000..7a61e58 --- /dev/null +++ b/bootstrap.sh @@ -0,0 +1,6 @@ +#!/usr/bin/env bash + + +python3 /root/yao-agent.py & + +python3 /root/server.py \ No newline at end of file diff --git a/executor.py b/executor.py new file mode 100644 index 0000000..65e5f15 --- /dev/null +++ b/executor.py @@ -0,0 +1,22 @@ +import docker + + +def run(): + client = docker.from_env() + #print(client.containers.run(image="alpine", command="echo 'Hello World'", environment={"KEY": "value"})) + print(client.containers.run(image="nvidia/cuda:9.0-base", command="nvidia-smi", environment={"KEY": "value"}, runtime="nvidia")) + + +def run_in_background(): + client = docker.from_env() + container = client.containers.run("alpine", ["echo", "hello", "world"], detach=True) + print(container.id) + + +def list_containers(): + client = docker.from_env() + for container in client.containers.list(): + print(container.id) + + +run() diff --git a/server.py b/server.py new file mode 100644 index 0000000..d91d045 --- /dev/null +++ b/server.py @@ -0,0 +1,108 @@ +#!/usr/bin/python +from http.server import BaseHTTPRequestHandler, HTTPServer +import cgi +import docker +import json +from urllib import parse + +PORT_NUMBER = 8000 + + +# This class will handles any incoming request from +# the browser +class MyHandler(BaseHTTPRequestHandler): + # Handler for the GET requests + def do_GET(self): + req = parse.urlparse(self.path) + query = parse.parse_qs(req.query) + + if req.path == "/ping": + # Open the static file requested and send it + self.send_response(200) + self.send_header('Content-type', 'application/json') + self.end_headers() + self.wfile.write(bytes("pong", "utf-8")) + + elif req.path == "/logs": + id = query['id'][0] + client = docker.from_env() + container = client.containers.get(id) + + msg = {'code': 0, 'logs': container.logs().decode()} + self.send_response(200) + self.send_header('Content-type', 'application/json') + self.end_headers() + self.wfile.write(bytes(json.dumps(msg), "utf-8")) + + else: + self.send_error(404, 'File Not Found: %s' % self.path) + + # Handler for the POST requests + def do_POST(self): + if self.path == "/create": + form = cgi.FieldStorage( + fp=self.rfile, + headers=self.headers, + environ={ + 'REQUEST_METHOD': 'POST', + 'CONTENT_TYPE': self.headers['Content-Type'], + }) + docker_image = form["image"].value + docker_cmd = form["cmd"].value + + print(docker_image) + print(docker_cmd) + + client = docker.from_env() + container = client.containers.run( + image=docker_image, + command=docker_cmd, + environment={"key": "value"}, + runtime="nvidia", + detach=True + ) + + msg = {"code": 0, "id": container.id} + + self.send_response(200) + self.send_header('Content-type', 'application/json') + self.end_headers() + self.wfile.write(bytes(json.dumps(msg), "utf-8")) + + elif self.path == "/stop": + form = cgi.FieldStorage( + fp=self.rfile, + headers=self.headers, + environ={ + 'REQUEST_METHOD': 'POST', + 'CONTENT_TYPE': self.headers['Content-Type'], + }) + id = form["id"].value + + client = docker.from_env() + container = client.containers.get(id) + container.stop() + msg = {"code": 0} + + self.send_response(200) + self.send_header('Content-type', 'application/json') + self.end_headers() + self.wfile.write(bytes(json.dumps(msg), "utf-8")) + else: + self.send_error(404, 'File Not Found: %s' % self.path) + + +try: + # Create a web server and define the handler to manage the + # incoming request + server = HTTPServer(('', PORT_NUMBER), MyHandler) + print('Started httpserver on port ', PORT_NUMBER) + + # Wait forever for incoming htto requests + server.serve_forever() + +except KeyboardInterrupt: + print('^C received, shutting down the web server') + + +server.socket.close() diff --git a/yao-agent.py b/yao-agent.py index 1cd1d79..95edac6 100644 --- a/yao-agent.py +++ b/yao-agent.py @@ -6,7 +6,6 @@ from xml.dom.minidom import parse import xml.dom.minidom from kafka import KafkaProducer - ClientID = os.getenv('ClientID', 1) KafkaBrokers = os.getenv('KafkaBrokers', 'localhost:9092').split(',') @@ -18,7 +17,7 @@ def main(): status, msg_gpu = execute(['nvidia-smi', '-q', '-x', '-f', 'status.xml']) if not status: print("execute failed, ", msg_gpu) - continue + continue report_msg() time.sleep(interval) except Exception as e: @@ -55,6 +54,16 @@ def report_msg(): 'temperature_gpu': gpu.getElementsByTagName('temperature')[0].getElementsByTagName('gpu_temp')[0].childNodes[0].data, 'power_draw': gpu.getElementsByTagName('power_readings')[0].getElementsByTagName('power_draw')[0].childNodes[0].data } + + stat['fan_speed'] = int(float(stat['fan_speed'].split(' ')[0])) + stat['memory_total'] = int(float(stat['memory_total'].split(' ')[0])) + stat['memory_free'] = int(float(stat['memory_free'].split(' ')[0])) + stat['memory_used'] = int(float(stat['memory_used'].split(' ')[0])) + stat['utilization_gpu'] = int(float(stat['utilization_gpu'].split(' ')[0])) + stat['utilization_mem'] = int(float(stat['utilization_mem'].split(' ')[0])) + stat['temperature_gpu'] = int(float(stat['temperature_gpu'].split(' ')[0])) + stat['power_draw'] = int(float(stat['power_draw'].split(' ')[0])) + stats.append(stat) post_fields = {'id': ClientID, 'status': stats}