mirror of
https://github.com/kellyjonbrazil/jc.git
synced 2025-07-13 01:20:24 +02:00
convert headers to lowercase
This commit is contained in:
@ -6,6 +6,7 @@ jc changelog
|
||||
- Add history parser
|
||||
- Flatten env parser output
|
||||
- Remove problematic characters from key names in: df, free, history, lsblk, lsof, and w
|
||||
- Where possible, lowercase all keys (except cases like env where the key is the variable name)
|
||||
|
||||
20191023 v0.9.1
|
||||
- Add jobs parser
|
||||
|
@ -50,11 +50,11 @@ def parse(data):
|
||||
# https://gist.github.com/cahna/43a1a3ff4d075bcd71f9d7120037a501
|
||||
|
||||
cleandata = data.splitlines()
|
||||
headers = [h for h in ' '.join(cleandata[0].strip().split()).split() if h]
|
||||
headers = [h for h in ' '.join(cleandata[0].lower().strip().split()).split() if h]
|
||||
|
||||
# clean up 'Use%' header
|
||||
# even though % in a key is valid json, it can make things difficult
|
||||
headers = ['Use_percent' if x == 'Use%' else x for x in headers]
|
||||
headers = ['use_percent' if x == 'use%' else x for x in headers]
|
||||
|
||||
raw_data = map(lambda s: s.strip().split(None, len(headers) - 1), cleandata[1:])
|
||||
return [dict(zip(headers, r)) for r in raw_data]
|
||||
|
@ -32,7 +32,7 @@ def parse(data):
|
||||
# https://gist.github.com/cahna/43a1a3ff4d075bcd71f9d7120037a501
|
||||
|
||||
cleandata = data.splitlines()
|
||||
headers = [h for h in ' '.join(cleandata[0].strip().split()).split() if h]
|
||||
headers = [h for h in ' '.join(cleandata[0].lower().strip().split()).split() if h]
|
||||
headers.insert(0, "type")
|
||||
|
||||
# clean up 'buff/cache' header
|
||||
|
@ -59,16 +59,16 @@ def parse(data):
|
||||
# https://gist.github.com/cahna/43a1a3ff4d075bcd71f9d7120037a501
|
||||
|
||||
cleandata = data.splitlines()
|
||||
headers = [h for h in ' '.join(cleandata[0].strip().split()).split() if h]
|
||||
headers = [h for h in ' '.join(cleandata[0].lower().strip().split()).split() if h]
|
||||
|
||||
# clean up 'MAJ:MIN' header
|
||||
# clean up 'maj:min' header
|
||||
# even though colon in a key is valid json, it can make things difficult
|
||||
headers = ['MAJ_MIN' if x == 'MAJ:MIN' else x for x in headers]
|
||||
headers = ['maj_min' if x == 'maj:min' else x for x in headers]
|
||||
|
||||
raw_data = map(lambda s: s.strip().split(None, len(headers) - 1), cleandata[1:])
|
||||
output = [dict(zip(headers, r)) for r in raw_data]
|
||||
|
||||
for entry in output:
|
||||
entry['NAME'] = entry['NAME'].encode('ascii', errors='ignore').decode()
|
||||
entry['name'] = entry['name'].encode('ascii', errors='ignore').decode()
|
||||
|
||||
return output
|
||||
|
@ -65,16 +65,13 @@ def parse(data):
|
||||
# https://gist.github.com/cahna/43a1a3ff4d075bcd71f9d7120037a501
|
||||
|
||||
cleandata = data.splitlines()
|
||||
headers = [h for h in ' '.join(cleandata[0].strip().split()).split() if h]
|
||||
|
||||
headers.pop(-1)
|
||||
headers.append('By')
|
||||
headers = [h for h in ' '.join(cleandata[0].lower().strip().split()).split() if h]
|
||||
|
||||
raw_data = map(lambda s: s.strip().split(None, len(headers) - 1), cleandata[1:])
|
||||
output = [dict(zip(headers, r)) for r in raw_data]
|
||||
|
||||
for mod in output:
|
||||
if 'By' in mod:
|
||||
mod['By'] = mod['By'].split(',')
|
||||
if 'by' in mod:
|
||||
mod['by'] = mod['by'].split(',')
|
||||
|
||||
return output
|
||||
|
@ -85,11 +85,11 @@ def parse(data):
|
||||
|
||||
# find column value of last character of each header
|
||||
header_row = cleandata.pop(0)
|
||||
headers = header_row.split()
|
||||
headers = header_row.lower().split()
|
||||
|
||||
# clean up 'SIZE/OFF' header
|
||||
# clean up 'size/off' header
|
||||
# even though forward slash in a key is valid json, it can make things difficult
|
||||
headers = ['SIZE_OFF' if x == 'SIZE/OFF' else x for x in headers]
|
||||
headers = ['size_off' if x == 'size/off' else x for x in headers]
|
||||
|
||||
header_spec = []
|
||||
for i, h in enumerate(headers):
|
||||
@ -109,7 +109,7 @@ def parse(data):
|
||||
header_name = spec[1]
|
||||
col = spec[2] - 1
|
||||
|
||||
if header_name == 'COMMAND' or header_name == 'NAME':
|
||||
if header_name == 'command' or header_name == 'name':
|
||||
continue
|
||||
if entry[col] == string.whitespace:
|
||||
temp_line.insert(index, None)
|
||||
|
@ -62,6 +62,6 @@ def parse(data):
|
||||
# https://gist.github.com/cahna/43a1a3ff4d075bcd71f9d7120037a501
|
||||
|
||||
cleandata = data.splitlines()
|
||||
headers = [h for h in ' '.join(cleandata[0].strip().split()).split() if h]
|
||||
headers = [h for h in ' '.join(cleandata[0].lower().strip().split()).split() if h]
|
||||
raw_data = map(lambda s: s.strip().split(None, len(headers) - 1), cleandata[1:])
|
||||
return [dict(zip(headers, r)) for r in raw_data]
|
||||
|
@ -58,6 +58,6 @@ def parse(data):
|
||||
# https://gist.github.com/cahna/43a1a3ff4d075bcd71f9d7120037a501
|
||||
|
||||
cleandata = data.splitlines()[1:]
|
||||
headers = [h for h in ' '.join(cleandata[0].strip().split()).split() if h]
|
||||
headers = [h for h in ' '.join(cleandata[0].lower().strip().split()).split() if h]
|
||||
raw_data = map(lambda s: s.strip().split(None, len(headers) - 1), cleandata[1:])
|
||||
return [dict(zip(headers, r)) for r in raw_data]
|
||||
|
@ -37,11 +37,11 @@ def parse(data):
|
||||
# https://gist.github.com/cahna/43a1a3ff4d075bcd71f9d7120037a501
|
||||
|
||||
cleandata = data.splitlines()[1:]
|
||||
headers = [h for h in ' '.join(cleandata[0].strip().split()).split() if h]
|
||||
headers = [h for h in ' '.join(cleandata[0].lower().strip().split()).split() if h]
|
||||
|
||||
# clean up 'LOGIN@' header
|
||||
# clean up 'login@' header
|
||||
# even though @ in a key is valid json, it can make things difficult
|
||||
headers = ['LOGIN_AT' if x == 'LOGIN@' else x for x in headers]
|
||||
headers = ['login_at' if x == 'login@' else x for x in headers]
|
||||
|
||||
raw_data = map(lambda s: s.strip().split(None, len(headers) - 1), cleandata[1:])
|
||||
return [dict(zip(headers, r)) for r in raw_data]
|
||||
|
Reference in New Issue
Block a user