Jelajahi Sumber

new disk format

raylu 11 tahun lalu
induk
melakukan
f90d5b47ef
3 mengubah file dengan 83 tambahan dan 62 penghapusan
  1. 50 35
      api/fileio.py
  2. 6 4
      api/server.py
  3. 27 23
      client/sysvitals_client

+ 50 - 35
api/fileio.py

@@ -1,34 +1,43 @@
 import struct
 
-formats = {
-	'cpu': 'f',
-	'mem': 'q',
-	'net': 'q',
-	'disk': 'q',
-}
+fields = [
+	('cpu', 'f', [
+		'user', 'iowait', 'system', 'nice',
+		'guest', 'guest_nice',
+		'irq', 'softirq', 'steal', 'idle',
+	]),
+	('mem', 'q', ['total', 'used', 'buffers', 'cached']),
+	('net', 'q', [
+		'bytes_recv', 'bytes_sent',
+		'dropin', 'dropout', 'errin', 'errout',
+	]),
+	('disk', 'q', ['total', 'used']),
+]
 
 def read_stats(f):
-	buf = f.read()
 	stats = {}
-	index = 0
-	while index < len(buf):
-		# read the key
-		key_size = ord(buf[index])
-		fmt = '%dp' % (key_size + 1)
-		data = struct.unpack(fmt, buf[index:index+key_size+1])
-		key = data[0]
+	for stat_group, format_code, subfields in fields:
+		field_data = {}
+		fmt = '1440' + format_code
+		if stat_group != 'disk':
+			size = struct.calcsize(fmt)
+			for field in subfields:
+				field_data[field] = struct.unpack(fmt, f.read(size))
+		else:
+			buf = f.read() # disk is last, so we can read everything
+			index = 0
+			while index < len(buf):
+				mountpoint_size = ord(buf[index])
+				disk_fmt = '%dp %s %s' % (mountpoint_size + 1, fmt, fmt)
+				size = struct.calcsize(disk_fmt)
+				data = struct.unpack(disk_fmt, buf[index:index+size])
 
-		# get the format_code
-		split = key.split('.')
-		stat_group = split[0]
-		format_code = formats[stat_group]
-
-		# read the data
-		fmt = '%dp 1440%s' % (key_size + 1, format_code)
-		size = struct.calcsize(fmt)
-		data = struct.unpack(fmt, buf[index:index+size])
-		dict_insert(stats, split, data[1:])
-		index += size
+				mountpoint = data[0]
+				total = data[1:1441]
+				used = data[1441:]
+				field_data[mountpoint] = {'used': used, 'total': total}
+				index += size
+		stats[stat_group] = field_data
 	return stats
 
 def dict_insert(d, split, value):
@@ -39,13 +48,19 @@ def dict_insert(d, split, value):
 		d[split[0]] = value
 
 def write_datum(f, data):
-	for stat_group, stats in data.items():
-		if stat_group == 'client_id':
-			continue
-		format_code = formats[stat_group]
-		for stat, datum in stats.items():
-			key = '%s.%s' % (stat_group, stat)
-			fmt = '%dp 1440%s' % (len(key) + 1, format_code)
-			array = [-1] * 1440
-			array[0] = datum
-			f.write(struct.pack(fmt, key.encode('utf-8'), *array))
+	for stat_group, format_code, subfields in fields:
+		fmt = '1440' + format_code
+		if stat_group == 'disk':
+			for mountpoint, disk_data in data['disk'].items():
+				mountpoint = mountpoint.encode('utf-8')
+				disk_fmt = '%dp %s %s' % (len(mountpoint) + 1, fmt, fmt)
+				total = [-1] * 1440
+				total[0] = disk_data['total']
+				used = [-1] * 1440
+				used[0] = disk_data['used']
+				f.write(struct.pack(disk_fmt, mountpoint, *(total + used)))
+		else:
+			for field in subfields:
+				array = [-1] * 1440
+				array[0] = data[stat_group][field]
+				f.write(struct.pack(fmt, *array))

+ 6 - 4
api/server.py

@@ -48,23 +48,25 @@ def application(environ, start_response):
 
 def get_data(split, environ):
 	group = int(split[1])
-	client_id = int(split[3])
-	data_path = path.join(DATA_DIR, str(group), str(client_id))
+	server_id = int(split[3])
+	data_path = path.join(DATA_DIR, str(group), str(server_id))
 	with open(data_path, 'r') as f:
 		stats = fileio.read_stats(f)
 	return json.dumps(stats)
 
 def post_datum(split, environ):
 	group = int(split[1])
+	server_id = int(split[3])
 	body = json.load(environ['wsgi.input'])
-	client_id = body['client_id']
+
 	group_dir = path.join(DATA_DIR, str(group))
 	try:
 		os.makedirs(group_dir)
 	except OSError as e:
 		if e.errno != errno.EEXIST:
 			raise
-	with open(path.join(group_dir, str(client_id)), 'w') as f:
+
+	with open(path.join(group_dir, str(server_id)), 'w') as f:
 		fileio.write_datum(f, body)
 	return '{"status": "ok"}'
 

+ 27 - 23
client/sysvitals_client

@@ -1,28 +1,32 @@
 #!/usr/bin/python
 
-from collections import namedtuple
-import psutil
 import json
+import psutil
+import urllib2
 
-def serialize(stats, partitions):
-	data = {type(stat).__name__: stat._asdict() for stat in stats}
-	data['partitions'] = {p[0]: p[1]._asdict() for p in partitions}
-	return json.dumps(data)
-
-def get_stats():
-	stats = [
-		psutil.cpu_times(),
-		psutil.virtual_memory(),
-		psutil.net_io_counters(),
-	]
-	partitions = []
-	for partition in psutil.disk_partitions():
-		partitions.append((partition.mountpoint, psutil.disk_usage(partition.mountpoint)))
-	return stats, partitions
-
-def post_stats():
-	stats, partitions = get_stats()
-	json_data = serialize(stats, partitions)
-	return json_data
+cpu = psutil.cpu_times()
+mem = psutil.virtual_memory()
+net = psutil.net_io_counters()
+datum = {
+	'cpu': cpu._asdict(),
+	'mem': {
+		'total': mem.total,
+		'used': mem.used,
+		'buffers': mem.buffers,
+		'cached': mem.cached,
+	},
+	'net': net._asdict(),
+	'disk': {}
+}
+for partition in psutil.disk_partitions():
+	usage = psutil.disk_usage(partition.mountpoint)
+	datum['disk'][partition.mountpoint] = {
+		'total': usage.total,
+		'used': usage.used,
+	}
 
-print(post_stats())
+API_SERVER = 'http://localhost:8892'
+GROUP_ID = 1
+SERVER_ID = 1
+url = '%s/v1/%d/datum/%d' % (API_SERVER, GROUP_ID, SERVER_ID)
+urllib2.urlopen(url, json.dumps(datum))