mirror of
https://github.com/kellyjonbrazil/jc.git
synced 2026-04-03 17:44:07 +02:00
Compare commits
522 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
032cda8b3d | ||
|
|
6badd3fb1e | ||
|
|
724d825745 | ||
|
|
ff1e32ad2e | ||
|
|
a5f97febd3 | ||
|
|
5baa6cc865 | ||
|
|
7a4f30b843 | ||
|
|
b2c385dc4f | ||
|
|
5d5da8d33f | ||
|
|
e604571578 | ||
|
|
f9dacc3f95 | ||
|
|
6086920332 | ||
|
|
f52f3163bc | ||
|
|
d18ff73e88 | ||
|
|
1e5d602cae | ||
|
|
12912521ec | ||
|
|
842ea3a94b | ||
|
|
a8560dbc15 | ||
|
|
a65e27540a | ||
|
|
c3c5ed11e6 | ||
|
|
ce24149335 | ||
|
|
0314ca8c48 | ||
|
|
ebd8ee49a9 | ||
|
|
38d10c9781 | ||
|
|
360106c24d | ||
|
|
ca470a5d02 | ||
|
|
57f66e6b1d | ||
|
|
e774f67924 | ||
|
|
ac10e576c1 | ||
|
|
bcae0a99cd | ||
|
|
c73c2ff879 | ||
|
|
c39b1a3356 | ||
|
|
125dc2d9e0 | ||
|
|
b7d4ddc7ce | ||
|
|
f5e546c6fa | ||
|
|
928e39cd10 | ||
|
|
d0b7ea68a0 | ||
|
|
8444690133 | ||
|
|
c03c42d767 | ||
|
|
ab67688a00 | ||
|
|
5dcb7166da | ||
|
|
14697b86d7 | ||
|
|
4f4b6276d4 | ||
|
|
7bc497e129 | ||
|
|
68a37a6a5a | ||
|
|
6f5cd1d7c5 | ||
|
|
126b1b121c | ||
|
|
2341e456a0 | ||
|
|
72d80e95bb | ||
|
|
f5ec82440c | ||
|
|
c8e526ead3 | ||
|
|
066adfb764 | ||
|
|
5b444d4717 | ||
|
|
69c95adc8d | ||
|
|
2b0e0d8f5c | ||
|
|
778d1bacbf | ||
|
|
7e1b041016 | ||
|
|
313b9b329c | ||
|
|
6830062256 | ||
|
|
323072c982 | ||
|
|
8719d96bdd | ||
|
|
dd5d318ab5 | ||
|
|
d6dc7f5e65 | ||
|
|
c203664eb5 | ||
|
|
19ecf1fa19 | ||
|
|
b8deb0426c | ||
|
|
3b8371f020 | ||
|
|
20bb1cdf39 | ||
|
|
301daa48d0 | ||
|
|
8421ec8803 | ||
|
|
74211eb012 | ||
|
|
60bd42f298 | ||
|
|
14bdd74526 | ||
|
|
fb0f3eda04 | ||
|
|
91ee6e6701 | ||
|
|
51f4e6927c | ||
|
|
94988d8667 | ||
|
|
fe36f5a98c | ||
|
|
f9eb18b927 | ||
|
|
cc60f36748 | ||
|
|
604ade791f | ||
|
|
690ac52a91 | ||
|
|
34ed772775 | ||
|
|
d5ab95571f | ||
|
|
ffb3a0ee5f | ||
|
|
94b12b57aa | ||
|
|
6d149e8457 | ||
|
|
1ad89c90d8 | ||
|
|
fb71c7b020 | ||
|
|
28ed17ad3b | ||
|
|
0c2a4e2bf7 | ||
|
|
62bec30de2 | ||
|
|
3fced77e4e | ||
|
|
a09d1d8b76 | ||
|
|
8f4243fbd8 | ||
|
|
47aaf20549 | ||
|
|
0c5289ea50 | ||
|
|
3e53323514 | ||
|
|
a5ee9861b9 | ||
|
|
feb8ca7654 | ||
|
|
a7abe4473b | ||
|
|
780b9b61de | ||
|
|
19ace36ffa | ||
|
|
5fff8afc9f | ||
|
|
4ad230c927 | ||
|
|
dd98eb1ec8 | ||
|
|
c6baf42e72 | ||
|
|
e2bac97d56 | ||
|
|
d112ee94d0 | ||
|
|
27b21b2faf | ||
|
|
8c96d5cd20 | ||
|
|
c29ed3fd69 | ||
|
|
cedf603f12 | ||
|
|
279161c36f | ||
|
|
ce0b43d919 | ||
|
|
ddafa5bf06 | ||
|
|
bc7116c31b | ||
|
|
53b7092721 | ||
|
|
beb9174b1b | ||
|
|
aea41ed341 | ||
|
|
d789494cb1 | ||
|
|
608e7b4cff | ||
|
|
4ee199c02a | ||
|
|
fbf47d4085 | ||
|
|
5a238e4b42 | ||
|
|
f852b8246a | ||
|
|
88140d929a | ||
|
|
45f7268240 | ||
|
|
3a3c8e4d4a | ||
|
|
c1ac183a04 | ||
|
|
18bb779ee5 | ||
|
|
8b6612fe79 | ||
|
|
fde0bc8534 | ||
|
|
e661a78939 | ||
|
|
847e346602 | ||
|
|
b969751688 | ||
|
|
ad6f2ba03a | ||
|
|
63c6a5edc0 | ||
|
|
9f4cf9dd5e | ||
|
|
51331b6dc0 | ||
|
|
efb6761033 | ||
|
|
6a4f737a0f | ||
|
|
be6864b778 | ||
|
|
de3b91a36c | ||
|
|
ef5482c3b5 | ||
|
|
d20b795137 | ||
|
|
8a134065df | ||
|
|
22aee1bfa4 | ||
|
|
b282820fd6 | ||
|
|
3ee098306d | ||
|
|
09e8f379a6 | ||
|
|
69018cdb3a | ||
|
|
d0d7254c6a | ||
|
|
cc0f0971d7 | ||
|
|
2af61730f0 | ||
|
|
83f41b83dc | ||
|
|
1fb84fce88 | ||
|
|
a8837e1244 | ||
|
|
04d2eec558 | ||
|
|
1b57ec92f0 | ||
|
|
4d88595404 | ||
|
|
52b1272a3a | ||
|
|
d2ccad6a83 | ||
|
|
cad6dde4ac | ||
|
|
06811c3539 | ||
|
|
0cb23c2b21 | ||
|
|
ac4688dca2 | ||
|
|
326c3b4670 | ||
|
|
9b29d0c268 | ||
|
|
e0013c3871 | ||
|
|
a75744075b | ||
|
|
525aec1a02 | ||
|
|
0bf9a7a072 | ||
|
|
d8f2f4c95b | ||
|
|
35d733b44f | ||
|
|
9179b4175c | ||
|
|
bb07d78c78 | ||
|
|
07b179cd7f | ||
|
|
054422d837 | ||
|
|
3e052d1810 | ||
|
|
c8e72805cf | ||
|
|
12a80e7db0 | ||
|
|
ee7ff9a09d | ||
|
|
f6478fb636 | ||
|
|
811a0b0495 | ||
|
|
aeb48edf72 | ||
|
|
b1e94f0df7 | ||
|
|
60050e3c0f | ||
|
|
39ef09aa5b | ||
|
|
8377d43116 | ||
|
|
54e4c447ab | ||
|
|
937a9fa9cf | ||
|
|
808ff6cf0e | ||
|
|
7f5c649a95 | ||
|
|
b72727dec9 | ||
|
|
3fc88bfb33 | ||
|
|
9f2279d586 | ||
|
|
346a14cb9b | ||
|
|
dac00d17ff | ||
|
|
9ca7cd4060 | ||
|
|
aa31628970 | ||
|
|
bed694fcf5 | ||
|
|
4b4af69fa1 | ||
|
|
9d96190a5b | ||
|
|
fa44d48c09 | ||
|
|
4ef961c278 | ||
|
|
292a837d5c | ||
|
|
aa7b915d84 | ||
|
|
c46fe73236 | ||
|
|
039b2c129c | ||
|
|
8f2e5e4808 | ||
|
|
c4da8e4f78 | ||
|
|
bcab9078a4 | ||
|
|
b3c6c1ea92 | ||
|
|
a3af8662bd | ||
|
|
35940d0bc8 | ||
|
|
26994cdcb7 | ||
|
|
017159a829 | ||
|
|
b4e9c85e08 | ||
|
|
189146cd84 | ||
|
|
af34153ffa | ||
|
|
bf2ff3ffbb | ||
|
|
6423c9efd6 | ||
|
|
58ab0d4ece | ||
|
|
83a738bf4d | ||
|
|
3640671fc6 | ||
|
|
1da623b30e | ||
|
|
b10ca64646 | ||
|
|
2128763ee6 | ||
|
|
a27e7ed39c | ||
|
|
f07b7eaa47 | ||
|
|
6ce18de84c | ||
|
|
8631b756e7 | ||
|
|
7414d98412 | ||
|
|
d7b19892e8 | ||
|
|
96df396eaf | ||
|
|
2f6f640317 | ||
|
|
c4a0a50f3a | ||
|
|
658f8a3842 | ||
|
|
bfb876a1e3 | ||
|
|
90c34b1f4e | ||
|
|
3f9164ea77 | ||
|
|
7fd6fecbf5 | ||
|
|
8029f72363 | ||
|
|
c7fdce5d3b | ||
|
|
84f48aa369 | ||
|
|
2e9a0a9c12 | ||
|
|
c1f6f2b950 | ||
|
|
ede21bca13 | ||
|
|
8dd9a9f9cb | ||
|
|
04f92cd133 | ||
|
|
8be8d2393b | ||
|
|
0a879681be | ||
|
|
2ca1587a49 | ||
|
|
ec2cd2d708 | ||
|
|
5d0dbece93 | ||
|
|
df1e4b414b | ||
|
|
40760991e7 | ||
|
|
464f5f86cf | ||
|
|
7b09e9fccd | ||
|
|
6cba7d4298 | ||
|
|
9730f62e49 | ||
|
|
e0c1c87f54 | ||
|
|
931b3d2b83 | ||
|
|
e5d561baee | ||
|
|
2867593e7a | ||
|
|
dd52fee563 | ||
|
|
8e1f885827 | ||
|
|
2d39a58f90 | ||
|
|
9c4fa2ae26 | ||
|
|
de52d84e82 | ||
|
|
ce9b55059a | ||
|
|
bcd370a6a0 | ||
|
|
c8216850ab | ||
|
|
f5feedb90b | ||
|
|
a4371cd187 | ||
|
|
9d5ba4c834 | ||
|
|
1639dee1bb | ||
|
|
9363f430f2 | ||
|
|
9192a09073 | ||
|
|
b915eb9755 | ||
|
|
1cfcc2b592 | ||
|
|
7138dd02b7 | ||
|
|
b4276643b7 | ||
|
|
2ef00763bf | ||
|
|
54364928fc | ||
|
|
09b3b4932b | ||
|
|
29d6670119 | ||
|
|
2f654b5f1a | ||
|
|
e53b9f5992 | ||
|
|
addb234e61 | ||
|
|
76eca3b659 | ||
|
|
f90dec4c0e | ||
|
|
8900a59d4c | ||
|
|
6685138200 | ||
|
|
4d3e65b980 | ||
|
|
e9282bb546 | ||
|
|
f5627a4594 | ||
|
|
81ffdb2510 | ||
|
|
4c00a99850 | ||
|
|
2bfcb45b28 | ||
|
|
ab0c10e791 | ||
|
|
2c1935115d | ||
|
|
d98e43dc78 | ||
|
|
9348988d64 | ||
|
|
1285c66467 | ||
|
|
b7191bbc13 | ||
|
|
98b97509f7 | ||
|
|
2b2b570490 | ||
|
|
cce2d1ff29 | ||
|
|
b79600c572 | ||
|
|
140f1a8543 | ||
|
|
e34657cfde | ||
|
|
99070fa607 | ||
|
|
2b46785b1f | ||
|
|
c72562524b | ||
|
|
b7dd6441c7 | ||
|
|
31fcc2f755 | ||
|
|
b391aa14bc | ||
|
|
d3c45debbb | ||
|
|
5b08469b87 | ||
|
|
4a77ec63a4 | ||
|
|
d13606b6dc | ||
|
|
05291c93bb | ||
|
|
8cf00a208e | ||
|
|
06d73c8876 | ||
|
|
649c646ea2 | ||
|
|
b7756d9250 | ||
|
|
1cd2cd954c | ||
|
|
72020b8da9 | ||
|
|
cf9720b749 | ||
|
|
967b9db7f9 | ||
|
|
bb3acb1182 | ||
|
|
560c7f7e6d | ||
|
|
79b2841764 | ||
|
|
a06a89cbd1 | ||
|
|
431bd969eb | ||
|
|
c87b722aec | ||
|
|
3688b8b014 | ||
|
|
07b8d9e0c0 | ||
|
|
7454b53e39 | ||
|
|
3d6a76024d | ||
|
|
421b980957 | ||
|
|
4a22e27d6a | ||
|
|
99f7842dee | ||
|
|
7f869b4b18 | ||
|
|
9665f4ee84 | ||
|
|
606904d48b | ||
|
|
3f5279b97c | ||
|
|
f5ec21e6ac | ||
|
|
578a284465 | ||
|
|
422e392d9d | ||
|
|
54dfffd34a | ||
|
|
cffba64d2b | ||
|
|
56a0c12a59 | ||
|
|
c174d3de18 | ||
|
|
a9c59ef9fc | ||
|
|
abdb9b2673 | ||
|
|
548aaab626 | ||
|
|
20571c87ae | ||
|
|
19e49200de | ||
|
|
d32f5c67a9 | ||
|
|
b83b626435 | ||
|
|
ab2c1b25ec | ||
|
|
f2d46313a4 | ||
|
|
87e4796a6c | ||
|
|
0014a5c2f4 | ||
|
|
7af56e0dad | ||
|
|
a5ae6e3c01 | ||
|
|
fe1a0d1faf | ||
|
|
302f05cdda | ||
|
|
c0044be7b0 | ||
|
|
0110078807 | ||
|
|
42eacb45f8 | ||
|
|
a43e2e1991 | ||
|
|
c8b721d4f6 | ||
|
|
d0bfddc3d9 | ||
|
|
6b925a16c8 | ||
|
|
89ebd9fc22 | ||
|
|
6b4ba66231 | ||
|
|
5b697dc381 | ||
|
|
9ba73c95d1 | ||
|
|
93aa390447 | ||
|
|
3cfb8945dd | ||
|
|
cd8d38f2a1 | ||
|
|
8ec8cd6294 | ||
|
|
c028113561 | ||
|
|
5f22e1c803 | ||
|
|
d3351787e5 | ||
|
|
e5bea9ae3b | ||
|
|
93c710abe9 | ||
|
|
c29e7cfe5c | ||
|
|
cb5c1ba00d | ||
|
|
9a012b94e1 | ||
|
|
400f5a44ec | ||
|
|
a2ab5bab91 | ||
|
|
fc8ab27361 | ||
|
|
59f19d33a5 | ||
|
|
dfc9618115 | ||
|
|
8e02e5c75a | ||
|
|
970493ab93 | ||
|
|
64d78956eb | ||
|
|
40c05346f4 | ||
|
|
e9b0bc1409 | ||
|
|
798e6bb7d9 | ||
|
|
12a370deed | ||
|
|
553bfbe1a0 | ||
|
|
52494321fc | ||
|
|
c6c9e06496 | ||
|
|
e3a6c05a58 | ||
|
|
391d06f68d | ||
|
|
99804ea06e | ||
|
|
51935deb2a | ||
|
|
b24d0c3a47 | ||
|
|
762a886d6f | ||
|
|
2c3e9ddfe4 | ||
|
|
c7cd2b63c8 | ||
|
|
f0528ea831 | ||
|
|
5bc5596f60 | ||
|
|
2c27ac46be | ||
|
|
caad840153 | ||
|
|
65bd7e2904 | ||
|
|
c3d7d7db12 | ||
|
|
5605310362 | ||
|
|
17b6efe82e | ||
|
|
a032ae56ae | ||
|
|
eab2f4b056 | ||
|
|
aff86ae6c7 | ||
|
|
7ece9ddc1a | ||
|
|
7cd048e839 | ||
|
|
1e22f610a3 | ||
|
|
5249c972ae | ||
|
|
fd45f856a0 | ||
|
|
c8ab40cd33 | ||
|
|
b2c872925b | ||
|
|
f48e229202 | ||
|
|
799fec92c3 | ||
|
|
87a41c2fca | ||
|
|
7f85de0c46 | ||
|
|
13661b1993 | ||
|
|
51d5c3892d | ||
|
|
e4eab4641a | ||
|
|
9b148e0ba3 | ||
|
|
de28932650 | ||
|
|
5f798d603e | ||
|
|
a0757b2dd3 | ||
|
|
498d51b4e8 | ||
|
|
b06b6bae3f | ||
|
|
b5eaff2137 | ||
|
|
c01bcd3734 | ||
|
|
d75c4068ca | ||
|
|
6aa2d5a3d2 | ||
|
|
065276805f | ||
|
|
a63408c8cf | ||
|
|
69576f6bfa | ||
|
|
19845624e2 | ||
|
|
22ff2964e9 | ||
|
|
d96b3a65a9 | ||
|
|
4989445ef4 | ||
|
|
6770892acd | ||
|
|
d4eba8740f | ||
|
|
9f60760560 | ||
|
|
0a8f8ac934 | ||
|
|
6ae24c8244 | ||
|
|
d3679082a8 | ||
|
|
fb08b42dca | ||
|
|
4aeaa9f42a | ||
|
|
5f5693da04 | ||
|
|
5eb0f61727 | ||
|
|
958e998991 | ||
|
|
b78c1509f6 | ||
|
|
ce184d4d57 | ||
|
|
b4c3714ced | ||
|
|
5b7dfa0438 | ||
|
|
391a388476 | ||
|
|
d9c4e2ed4c | ||
|
|
0c42db38b1 | ||
|
|
2f9be8bf33 | ||
|
|
e8c00155e8 | ||
|
|
cc88fdd9ee | ||
|
|
d9de11ef1d | ||
|
|
0ceda97d09 | ||
|
|
d0dec92ba8 | ||
|
|
d420c008d8 | ||
|
|
f0b32db433 | ||
|
|
bc838eda59 | ||
|
|
afe55b6af0 | ||
|
|
dd3a3ac302 | ||
|
|
f9982a7947 | ||
|
|
07c1be9e9a | ||
|
|
f832b88755 | ||
|
|
0fac757efc | ||
|
|
fc15742065 | ||
|
|
6f2466a131 | ||
|
|
4b90e22f0a | ||
|
|
c493568785 | ||
|
|
1cdf004b77 | ||
|
|
a4ea504261 | ||
|
|
4c2c234c3b | ||
|
|
3d4c0f3e89 | ||
|
|
52fad02903 | ||
|
|
9dcabc057c | ||
|
|
db8c1079dd | ||
|
|
8f954673ab | ||
|
|
79522d1c7d | ||
|
|
a18bf03079 | ||
|
|
c02b6b5d82 | ||
|
|
f99b423284 | ||
|
|
d7d9d45d4f | ||
|
|
90065ec0cd | ||
|
|
51157ebb86 | ||
|
|
96d95c79ca | ||
|
|
e5da34c233 | ||
|
|
f09d657f77 | ||
|
|
0f4b0189f5 | ||
|
|
4666042abb | ||
|
|
027d544c2b | ||
|
|
f1967d0138 | ||
|
|
c1d896027d | ||
|
|
5c2d2a6618 | ||
|
|
997b269b0b | ||
|
|
61257e7525 |
31
.github/workflows/pythonapp.yml
vendored
Normal file
31
.github/workflows/pythonapp.yml
vendored
Normal file
@@ -0,0 +1,31 @@
|
||||
name: Tests
|
||||
|
||||
on:
|
||||
push:
|
||||
paths:
|
||||
- "**/*.py"
|
||||
pull_request:
|
||||
paths:
|
||||
- "**/*.py"
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ${{ matrix.os }}
|
||||
strategy:
|
||||
matrix:
|
||||
os: [macos-latest, ubuntu-latest, windows-latest]
|
||||
python-version: [3.6, 3.7, 3.8]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v2
|
||||
- name: Set up Python ${{ matrix.python-version }}
|
||||
uses: actions/setup-python@v1
|
||||
with:
|
||||
python-version: ${{ matrix.python-version }}
|
||||
- name: Install dependencies
|
||||
run: |
|
||||
python -m pip install --upgrade pip
|
||||
pip install -r requirements.txt
|
||||
- name: Test with unittest
|
||||
run: |
|
||||
python -m unittest discover tests
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -3,4 +3,4 @@ __pycache__
|
||||
dist/
|
||||
build/
|
||||
*.egg-info/
|
||||
jc/parsers.old/
|
||||
.github/
|
||||
|
||||
2
LICENSE.md
Executable file → Normal file
2
LICENSE.md
Executable file → Normal file
@@ -1,6 +1,6 @@
|
||||
MIT License
|
||||
|
||||
Copyright (c) 2019 Kelly Brazil
|
||||
Copyright (c) 2020 Kelly Brazil
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
|
||||
@@ -1 +0,0 @@
|
||||
graft tests/fixtures
|
||||
160
changelog.txt
160
changelog.txt
@@ -1,5 +1,165 @@
|
||||
jc changelog
|
||||
|
||||
20200727 v1.13.0
|
||||
- Add ping and ping6 command parser tested on linux, macos, and freebsd
|
||||
- Add traceroute and traceroute6 command parser tested on linux, macos, and freebsd
|
||||
- Add tracepath command parser tested on linux
|
||||
- Update ini parser to support files only containing key/value pairs
|
||||
- Update uname parser exception with a hint to use "uname -a"
|
||||
- Update route parser to support IPv6 tables
|
||||
|
||||
20200711 v1.12.1
|
||||
- Fix tests when using older version of pygments library
|
||||
|
||||
20200710 v1.12.0
|
||||
- Add sysctl command parser tested on linux, macOS, and freebsd
|
||||
- Update the cli code to allow older versions of the pygments library (2.3.0) for debian packaging
|
||||
- Code cleanup on the cli
|
||||
- Add tests for the cli
|
||||
- Vendorize cgitb as tracebackplus for verbose debug messages
|
||||
|
||||
20200625 v1.11.8
|
||||
- Add verbose debug option using -dd argument
|
||||
|
||||
20200622 v1.11.7
|
||||
- Fix iptables parser issue which would not output the last chain
|
||||
|
||||
20200614 v1.11.6
|
||||
- Improve and standardize empty data check for all parsers
|
||||
|
||||
20200612 v1.11.5
|
||||
- Update airport_s parser to fix error on parsing empty data
|
||||
- Update arp parser to fix error on parsing empty data
|
||||
- Update blkid parser to fix error on parsing empty data
|
||||
- Update crontab parser to fix error on parsing empty data
|
||||
- Update crontab_u parser to fix error on parsing empty data
|
||||
- Update df parser to fix error on parsing empty data
|
||||
- Update free parser to fix error on parsing empty data
|
||||
- Update lsblk parser to fix error on parsing empty data
|
||||
- Update lsmod parser to fix error on parsing empty data
|
||||
- Update mount parser to fix error on parsing empty data
|
||||
- Update netstat parser to fix error on parsing empty data
|
||||
- Update ntpq parser to fix error on parsing empty data
|
||||
- Update ps parser to fix error on parsing empty data
|
||||
- Update route parser to fix error on parsing empty data
|
||||
- Update systemctl parser to fix error on parsing empty data
|
||||
- Update systemctl_lj parser to fix error on parsing empty data
|
||||
- Update systemctl_ls parser to fix error on parsing empty data
|
||||
- Update systemctl_luf parser to fix error on parsing empty data
|
||||
- Update uptime parser to fix error on parsing empty data
|
||||
- Update w parser to fix error on parsing empty data
|
||||
- Update xml parser to fix error on parsing empty data
|
||||
- Add tests to all parsers for no data condition
|
||||
- Update ss parser to fix integer fields
|
||||
|
||||
20200610 v1.11.4
|
||||
- Update ls parser to fix error on parsing an empty directory
|
||||
|
||||
20200609 v1.11.3
|
||||
- Add local parser plugin feature (contributed by Dean Serenevy)
|
||||
|
||||
20200530 v1.11.2
|
||||
- Update netstat parser to add freebsd support
|
||||
- Update netstat parser to add route_flags_pretty field
|
||||
- Update netstat parser to change osx_inode field name to unix_inode
|
||||
- Update netstat parser to change osx_flags field name to unix_flags
|
||||
- Update netstat parser to strip whitespace from state field
|
||||
- Update route parser to add flags_pretty field
|
||||
- Update arp parser to add permanent field (freebsd and osx)
|
||||
- Update arp parser to add expires field (freebsd)
|
||||
- Update w parser to strip whitespace from what field
|
||||
- Update last parser to fix FreeBSD issues
|
||||
- Update stat parser to change osx_flags field name to unix_flags
|
||||
- Update stat parser to add unix_device field for freebsd and osx
|
||||
- Fix freebsd compatibility message for df, fstab, mount, ntpq, stat, and uname parsers
|
||||
- Fix compatibility message for platforms that include the version number at the end (e.g. freebsd12)
|
||||
|
||||
20200523 v1.11.1
|
||||
- Update stat command parser to change osx_flags field to string
|
||||
|
||||
20200522 v1.11.0
|
||||
- Add dmidecode command parser
|
||||
- Update stat command parser to add OSX support
|
||||
- Update netstat command parser to add OSX support
|
||||
- Update netstat command parser to add -r (route) functionality for linux and OSX
|
||||
- Update netstat command parser to add -i (interface) functionality for linux and OSX
|
||||
|
||||
20200511 v1.10.12
|
||||
- Remove shebang from jc/cli.py for Fedora packaging
|
||||
|
||||
20200511 v1.10.11
|
||||
- Change file permissions for Fedora packaging
|
||||
|
||||
20200509 v1.10.10
|
||||
- Fix ls parser issue where the first file was skipped for ls -R on some platforms
|
||||
- Update last parser to handle 'gone - no logout' condition
|
||||
- Update netstat parser to handle bluetooth section (ignore gracefully for now)
|
||||
|
||||
20200508 v1.10.9
|
||||
- Add license info to vendorized ifconfig-parser class
|
||||
|
||||
20200508 v1.10.8
|
||||
- Add license file to dist for Fedora RPM packaging requirements
|
||||
- Remove tests from package to keep from polluting the global site-packages
|
||||
|
||||
20200501 v1.10.7
|
||||
- Requirements modifications for Fedora RPM packaging requirements
|
||||
|
||||
20200420 v1.10.6
|
||||
- Remove homebrew shim references from du osx tests
|
||||
|
||||
20200414 v1.10.5
|
||||
- Minor change of using sys.exit(0) instead of exit()
|
||||
|
||||
20200412 v1.10.4
|
||||
- Add color customization via JC_COLORS env variable
|
||||
|
||||
20200409 v1.10.3
|
||||
- Fix break on pipe error
|
||||
|
||||
20200409 v1.10.2
|
||||
- Change colors to ansi and match jello colors
|
||||
|
||||
20200402 v1.10.1
|
||||
- Code cleanup
|
||||
|
||||
20200402 v1.10.0
|
||||
- Add color output by default when not piping data to another program
|
||||
- Add -m option for monochrome output
|
||||
|
||||
20200326 v1.9.3
|
||||
- Add axfr support for dig command parser
|
||||
|
||||
20200312 v1.9.2
|
||||
- Updated arp parser to fix OSX detection for some edge cases
|
||||
|
||||
20200312 v1.9.1
|
||||
- Updated file command parser to make filename splitting more robust
|
||||
|
||||
20200311 v1.9.0
|
||||
- Added ntpq command parser
|
||||
- Added timedatectl status command parser
|
||||
- Added airport -I and airport -s command parser
|
||||
- Added file command parser
|
||||
- Optimized history command parser by https://github.com/philippeitis
|
||||
- Magic syntax fix for certain edge cases
|
||||
|
||||
20200308 v1.8.1
|
||||
- CLI optimizations by https://github.com/philippeitis
|
||||
- Refactored magic syntax function and added tests (https://github.com/philippeitis)
|
||||
- Github actions for CI testing on multiple platforms by https://github.com/philippeitis
|
||||
- Updated ls parser to fix parsing error in OSX with -lR when there are empty folders
|
||||
|
||||
20200303 v1.8.0
|
||||
- Added blkid command parser
|
||||
- Added last and lastb command parser
|
||||
- Added who command parser
|
||||
- Added CSV file parser
|
||||
- Added /etc/passwd file parser
|
||||
- Added /etc/shadow file parser
|
||||
- Added /etc/group file parser
|
||||
- Added /etc/gshadow file parser
|
||||
|
||||
20200227 v1.7.5
|
||||
- Updated ls parser to support filenames with newline characters
|
||||
|
||||
|
||||
18
docgen.sh
18
docgen.sh
@@ -4,15 +4,23 @@
|
||||
cd jc
|
||||
pydocmd simple jc+ > ../docs/readme.md
|
||||
pydocmd simple utils+ > ../docs/utils.md
|
||||
pydocmd simple jc.parsers.airport+ > ../docs/parsers/airport.md
|
||||
pydocmd simple jc.parsers.airport_s+ > ../docs/parsers/airport_s.md
|
||||
pydocmd simple jc.parsers.arp+ > ../docs/parsers/arp.md
|
||||
pydocmd simple jc.parsers.blkid+ > ../docs/parsers/blkid.md
|
||||
pydocmd simple jc.parsers.crontab+ > ../docs/parsers/crontab.md
|
||||
pydocmd simple jc.parsers.crontab_u+ > ../docs/parsers/crontab_u.md
|
||||
pydocmd simple jc.parsers.csv+ > ../docs/parsers/csv.md
|
||||
pydocmd simple jc.parsers.df+ > ../docs/parsers/df.md
|
||||
pydocmd simple jc.parsers.dig+ > ../docs/parsers/dig.md
|
||||
pydocmd simple jc.parsers.dmidecode+ > ../docs/parsers/dmidecode.md
|
||||
pydocmd simple jc.parsers.du+ > ../docs/parsers/du.md
|
||||
pydocmd simple jc.parsers.env+ > ../docs/parsers/env.md
|
||||
pydocmd simple jc.parsers.file+ > ../docs/parsers/file.md
|
||||
pydocmd simple jc.parsers.free+ > ../docs/parsers/free.md
|
||||
pydocmd simple jc.parsers.fstab+ > ../docs/parsers/fstab.md
|
||||
pydocmd simple jc.parsers.group+ > ../docs/parsers/group.md
|
||||
pydocmd simple jc.parsers.gshadow+ > ../docs/parsers/gshadow.md
|
||||
pydocmd simple jc.parsers.history+ > ../docs/parsers/history.md
|
||||
pydocmd simple jc.parsers.hosts+ > ../docs/parsers/hosts.md
|
||||
pydocmd simple jc.parsers.id+ > ../docs/parsers/id.md
|
||||
@@ -20,24 +28,34 @@ pydocmd simple jc.parsers.ifconfig+ > ../docs/parsers/ifconfig.md
|
||||
pydocmd simple jc.parsers.ini+ > ../docs/parsers/ini.md
|
||||
pydocmd simple jc.parsers.iptables+ > ../docs/parsers/iptables.md
|
||||
pydocmd simple jc.parsers.jobs+ > ../docs/parsers/jobs.md
|
||||
pydocmd simple jc.parsers.last+ > ../docs/parsers/last.md
|
||||
pydocmd simple jc.parsers.ls+ > ../docs/parsers/ls.md
|
||||
pydocmd simple jc.parsers.lsblk+ > ../docs/parsers/lsblk.md
|
||||
pydocmd simple jc.parsers.lsmod+ > ../docs/parsers/lsmod.md
|
||||
pydocmd simple jc.parsers.lsof+ > ../docs/parsers/lsof.md
|
||||
pydocmd simple jc.parsers.mount+ > ../docs/parsers/mount.md
|
||||
pydocmd simple jc.parsers.netstat+ > ../docs/parsers/netstat.md
|
||||
pydocmd simple jc.parsers.ntpq+ > ../docs/parsers/ntpq.md
|
||||
pydocmd simple jc.parsers.passwd+ > ../docs/parsers/passwd.md
|
||||
pydocmd simple jc.parsers.ping+ > ../docs/parsers/ping.md
|
||||
pydocmd simple jc.parsers.pip_list+ > ../docs/parsers/pip_list.md
|
||||
pydocmd simple jc.parsers.pip_show+ > ../docs/parsers/pip_show.md
|
||||
pydocmd simple jc.parsers.ps+ > ../docs/parsers/ps.md
|
||||
pydocmd simple jc.parsers.route+ > ../docs/parsers/route.md
|
||||
pydocmd simple jc.parsers.shadow+ > ../docs/parsers/shadow.md
|
||||
pydocmd simple jc.parsers.ss+ > ../docs/parsers/ss.md
|
||||
pydocmd simple jc.parsers.stat+ > ../docs/parsers/stat.md
|
||||
pydocmd simple jc.parsers.sysctl+ > ../docs/parsers/sysctl.md
|
||||
pydocmd simple jc.parsers.systemctl+ > ../docs/parsers/systemctl.md
|
||||
pydocmd simple jc.parsers.systemctl_lj+ > ../docs/parsers/systemctl_lj.md
|
||||
pydocmd simple jc.parsers.systemctl_ls+ > ../docs/parsers/systemctl_ls.md
|
||||
pydocmd simple jc.parsers.systemctl_luf+ > ../docs/parsers/systemctl_luf.md
|
||||
pydocmd simple jc.parsers.timedatectl+ > ../docs/parsers/timedatectl.md
|
||||
pydocmd simple jc.parsers.tracepath+ > ../docs/parsers/tracepath.md
|
||||
pydocmd simple jc.parsers.traceroute+ > ../docs/parsers/traceroute.md
|
||||
pydocmd simple jc.parsers.uname+ > ../docs/parsers/uname.md
|
||||
pydocmd simple jc.parsers.uptime+ > ../docs/parsers/uptime.md
|
||||
pydocmd simple jc.parsers.w+ > ../docs/parsers/w.md
|
||||
pydocmd simple jc.parsers.who+ > ../docs/parsers/who.md
|
||||
pydocmd simple jc.parsers.xml+ > ../docs/parsers/xml.md
|
||||
pydocmd simple jc.parsers.yaml+ > ../docs/parsers/yaml.md
|
||||
|
||||
109
docs/parsers/airport.md
Normal file
109
docs/parsers/airport.md
Normal file
@@ -0,0 +1,109 @@
|
||||
# jc.parsers.airport
|
||||
jc - JSON CLI output utility airport -I Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --airport as the first argument if the piped input is coming from airport -I (OSX)
|
||||
|
||||
This program can be found at:
|
||||
/System/Library/PrivateFrameworks/Apple80211.framework/Versions/Current/Resources/airport
|
||||
|
||||
Compatibility:
|
||||
|
||||
'darwin'
|
||||
|
||||
Examples:
|
||||
|
||||
$ airport -I | jc --airport -p
|
||||
{
|
||||
"agrctlrssi": -66,
|
||||
"agrextrssi": 0,
|
||||
"agrctlnoise": -90,
|
||||
"agrextnoise": 0,
|
||||
"state": "running",
|
||||
"op_mode": "station",
|
||||
"lasttxrate": 195,
|
||||
"maxrate": 867,
|
||||
"lastassocstatus": 0,
|
||||
"802_11_auth": "open",
|
||||
"link_auth": "wpa2-psk",
|
||||
"bssid": "3c:37:86:15:ad:f9",
|
||||
"ssid": "SnazzleDazzle",
|
||||
"mcs": 0,
|
||||
"channel": "48,80"
|
||||
}
|
||||
|
||||
$ airport -I | jc --airport -p -r
|
||||
{
|
||||
"agrctlrssi": "-66",
|
||||
"agrextrssi": "0",
|
||||
"agrctlnoise": "-90",
|
||||
"agrextnoise": "0",
|
||||
"state": "running",
|
||||
"op_mode": "station",
|
||||
"lasttxrate": "195",
|
||||
"maxrate": "867",
|
||||
"lastassocstatus": "0",
|
||||
"802_11_auth": "open",
|
||||
"link_auth": "wpa2-psk",
|
||||
"bssid": "3c:37:86:15:ad:f9",
|
||||
"ssid": "SnazzleDazzle",
|
||||
"mcs": "0",
|
||||
"channel": "48,80"
|
||||
}
|
||||
|
||||
## info
|
||||
```python
|
||||
info(self, /, *args, **kwargs)
|
||||
```
|
||||
|
||||
## process
|
||||
```python
|
||||
process(proc_data)
|
||||
```
|
||||
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Structured data with the following schema:
|
||||
|
||||
{
|
||||
"agrctlrssi": integer,
|
||||
"agrextrssi": integer,
|
||||
"agrctlnoise": integer,
|
||||
"agrextnoise": integer,
|
||||
"state": string,
|
||||
"op_mode": string,
|
||||
"lasttxrate": integer,
|
||||
"maxrate": integer,
|
||||
"lastassocstatus": integer,
|
||||
"802_11_auth": string,
|
||||
"link_auth": string,
|
||||
"bssid": string,
|
||||
"ssid": string,
|
||||
"mcs": integer,
|
||||
"channel": string
|
||||
}
|
||||
|
||||
## parse
|
||||
```python
|
||||
parse(data, raw=False, quiet=False)
|
||||
```
|
||||
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Raw or processed structured data.
|
||||
|
||||
136
docs/parsers/airport_s.md
Normal file
136
docs/parsers/airport_s.md
Normal file
@@ -0,0 +1,136 @@
|
||||
# jc.parsers.airport_s
|
||||
jc - JSON CLI output utility airport -s Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --airport as the first argument if the piped input is coming from airport -s (OSX)
|
||||
|
||||
This program can be found at:
|
||||
/System/Library/PrivateFrameworks/Apple80211.framework/Versions/Current/Resources/airport
|
||||
|
||||
Compatibility:
|
||||
|
||||
'darwin'
|
||||
|
||||
Examples:
|
||||
|
||||
$ airport -s | jc --airport-s -p
|
||||
[
|
||||
{
|
||||
"ssid": "DIRECT-4A-HP OfficeJet 3830",
|
||||
"bssid": "00:67:eb:2a:a7:3b",
|
||||
"rssi": -90,
|
||||
"channel": "6",
|
||||
"ht": true,
|
||||
"cc": "--",
|
||||
"security": [
|
||||
"WPA2(PSK/AES/AES)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"ssid": "Latitude38",
|
||||
"bssid": "c0:ff:d5:d2:7a:f3",
|
||||
"rssi": -85,
|
||||
"channel": "11",
|
||||
"ht": true,
|
||||
"cc": "US",
|
||||
"security": [
|
||||
"WPA2(PSK/AES/AES)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"ssid": "xfinitywifi",
|
||||
"bssid": "6e:e3:0e:b8:45:99",
|
||||
"rssi": -83,
|
||||
"channel": "11",
|
||||
"ht": true,
|
||||
"cc": "US",
|
||||
"security": [
|
||||
"NONE"
|
||||
]
|
||||
},
|
||||
...
|
||||
]
|
||||
|
||||
$ airport -s | jc --airport -p -r
|
||||
[
|
||||
{
|
||||
"ssid": "DIRECT-F3-HP ENVY 5660 series",
|
||||
"bssid": "b0:5a:da:6f:0a:d4",
|
||||
"rssi": "-93",
|
||||
"channel": "1",
|
||||
"ht": "Y",
|
||||
"cc": "--",
|
||||
"security": "WPA2(PSK/AES/AES)"
|
||||
},
|
||||
{
|
||||
"ssid": "YouAreInfected-5",
|
||||
"bssid": "5c:e3:0e:c2:85:da",
|
||||
"rssi": "-85",
|
||||
"channel": "36",
|
||||
"ht": "Y",
|
||||
"cc": "US",
|
||||
"security": "WPA(PSK/AES,TKIP/TKIP) WPA2(PSK/AES,TKIP/TKIP)"
|
||||
},
|
||||
{
|
||||
"ssid": "YuanFamily",
|
||||
"bssid": "5c:e3:0e:b8:5f:9a",
|
||||
"rssi": "-84",
|
||||
"channel": "11",
|
||||
"ht": "Y",
|
||||
"cc": "US",
|
||||
"security": "WPA(PSK/AES,TKIP/TKIP) WPA2(PSK/AES,TKIP/TKIP)"
|
||||
},
|
||||
...
|
||||
]
|
||||
|
||||
## info
|
||||
```python
|
||||
info(self, /, *args, **kwargs)
|
||||
```
|
||||
|
||||
## process
|
||||
```python
|
||||
process(proc_data)
|
||||
```
|
||||
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Structured data with the following schema:
|
||||
[
|
||||
{
|
||||
"ssid": string,
|
||||
"bssid": string,
|
||||
"rssi": integer,
|
||||
"channel": string,
|
||||
"ht": boolean,
|
||||
"cc": string,
|
||||
"security": [
|
||||
string,
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
## parse
|
||||
```python
|
||||
parse(data, raw=False, quiet=False)
|
||||
```
|
||||
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Raw or processed structured data.
|
||||
|
||||
@@ -59,6 +59,8 @@ Examples:
|
||||
"hwtype": "ether",
|
||||
"hwaddress": "00:50:56:f0:98:26",
|
||||
"iface": "ens33"
|
||||
"permanent": false,
|
||||
"expires": 1182
|
||||
},
|
||||
{
|
||||
"name": "gateway",
|
||||
@@ -66,6 +68,8 @@ Examples:
|
||||
"hwtype": "ether",
|
||||
"hwaddress": "00:50:56:f7:4a:fc",
|
||||
"iface": "ens33"
|
||||
"permanent": false,
|
||||
"expires": 110
|
||||
}
|
||||
]
|
||||
|
||||
@@ -77,6 +81,8 @@ Examples:
|
||||
"hwtype": "ether",
|
||||
"hwaddress": "00:50:56:fe:7a:b4",
|
||||
"iface": "ens33"
|
||||
"permanent": false,
|
||||
"expires": "1182"
|
||||
},
|
||||
{
|
||||
"name": "_gateway",
|
||||
@@ -84,6 +90,8 @@ Examples:
|
||||
"hwtype": "ether",
|
||||
"hwaddress": "00:50:56:f7:4a:fc",
|
||||
"iface": "ens33"
|
||||
"permanent": false,
|
||||
"expires": "110"
|
||||
}
|
||||
]
|
||||
|
||||
@@ -114,7 +122,9 @@ Returns:
|
||||
"hwtype": string,
|
||||
"hwaddress": string,
|
||||
"flags_mask": string,
|
||||
"iface": string
|
||||
"iface": string,
|
||||
"permanent": boolean,
|
||||
"expires": integer
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
148
docs/parsers/blkid.md
Normal file
148
docs/parsers/blkid.md
Normal file
@@ -0,0 +1,148 @@
|
||||
# jc.parsers.blkid
|
||||
jc - JSON CLI output utility blkid Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --blkid as the first argument if the piped input is coming from blkid
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux'
|
||||
|
||||
Examples:
|
||||
|
||||
$ blkid | jc --blkid -p
|
||||
[
|
||||
{
|
||||
"device": "/dev/sda1",
|
||||
"uuid": "05d927ab-5875-49e4-ada1-7f46cb32c932",
|
||||
"type": "xfs"
|
||||
},
|
||||
{
|
||||
"device": "/dev/sda2",
|
||||
"uuid": "3klkIj-w1kk-DkJi-0XBJ-y3i7-i2Ac-vHqWBM",
|
||||
"type": "LVM2_member"
|
||||
},
|
||||
{
|
||||
"device": "/dev/mapper/centos-root",
|
||||
"uuid": "07d718ff-950c-4e5b-98f0-42a1147c77d9",
|
||||
"type": "xfs"
|
||||
},
|
||||
{
|
||||
"device": "/dev/mapper/centos-swap",
|
||||
"uuid": "615eb89a-bcbf-46fd-80e3-c483ff5c931f",
|
||||
"type": "swap"
|
||||
}
|
||||
]
|
||||
|
||||
$ sudo blkid -o udev -ip /dev/sda2 | jc --blkid -p
|
||||
[
|
||||
{
|
||||
"id_fs_uuid": "3klkIj-w1kk-DkJi-0XBJ-y3i7-i2Ac-vHqWBM",
|
||||
"id_fs_uuid_enc": "3klkIj-w1kk-DkJi-0XBJ-y3i7-i2Ac-vHqWBM",
|
||||
"id_fs_version": "LVM2\x20001",
|
||||
"id_fs_type": "LVM2_member",
|
||||
"id_fs_usage": "raid",
|
||||
"id_iolimit_minimum_io_size": 512,
|
||||
"id_iolimit_physical_sector_size": 512,
|
||||
"id_iolimit_logical_sector_size": 512,
|
||||
"id_part_entry_scheme": "dos",
|
||||
"id_part_entry_type": "0x8e",
|
||||
"id_part_entry_number": 2,
|
||||
"id_part_entry_offset": 2099200,
|
||||
"id_part_entry_size": 39843840,
|
||||
"id_part_entry_disk": "8:0"
|
||||
}
|
||||
]
|
||||
|
||||
$ sudo blkid -ip /dev/sda1 | jc --blkid -p -r
|
||||
[
|
||||
{
|
||||
"devname": "/dev/sda1",
|
||||
"uuid": "05d927bb-5875-49e3-ada1-7f46cb31c932",
|
||||
"type": "xfs",
|
||||
"usage": "filesystem",
|
||||
"minimum_io_size": "512",
|
||||
"physical_sector_size": "512",
|
||||
"logical_sector_size": "512",
|
||||
"part_entry_scheme": "dos",
|
||||
"part_entry_type": "0x83",
|
||||
"part_entry_flags": "0x80",
|
||||
"part_entry_number": "1",
|
||||
"part_entry_offset": "2048",
|
||||
"part_entry_size": "2097152",
|
||||
"part_entry_disk": "8:0"
|
||||
}
|
||||
]
|
||||
|
||||
## info
|
||||
```python
|
||||
info(self, /, *args, **kwargs)
|
||||
```
|
||||
|
||||
## process
|
||||
```python
|
||||
process(proc_data)
|
||||
```
|
||||
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Structured data with the following schema:
|
||||
|
||||
[
|
||||
{
|
||||
"device": string,
|
||||
"uuid": string,
|
||||
"type": string,
|
||||
"usage": string,
|
||||
"part_entry_scheme": string,
|
||||
"part_entry_type": string,
|
||||
"part_entry_flags": string,
|
||||
"part_entry_number": integer,
|
||||
"part_entry_offset": integer,
|
||||
"part_entry_size": integer,
|
||||
"part_entry_disk": string,
|
||||
"id_fs_uuid": string,
|
||||
"id_fs_uuid_enc": string,
|
||||
"id_fs_version": string,
|
||||
"id_fs_type": string,
|
||||
"id_fs_usage": string,
|
||||
"id_part_entry_scheme": string,
|
||||
"id_part_entry_type": string,
|
||||
"id_part_entry_flags": string,
|
||||
"id_part_entry_number": integer,
|
||||
"id_part_entry_offset": integer,
|
||||
"id_part_entry_size": integer,
|
||||
"id_iolimit_minimum_io_size": integer,
|
||||
"id_iolimit_physical_sector_size": integer,
|
||||
"id_iolimit_logical_sector_size": integer,
|
||||
"id_part_entry_disk": string,
|
||||
"minimum_io_size": integer,
|
||||
"physical_sector_size": integer,
|
||||
"logical_sector_size": integer
|
||||
}
|
||||
]
|
||||
|
||||
## parse
|
||||
```python
|
||||
parse(data, raw=False, quiet=False)
|
||||
```
|
||||
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Raw or processed structured data.
|
||||
|
||||
105
docs/parsers/csv.md
Normal file
105
docs/parsers/csv.md
Normal file
@@ -0,0 +1,105 @@
|
||||
# jc.parsers.csv
|
||||
jc - JSON CLI output utility csv Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --csv as the first argument if the piped input is coming from a csv file.
|
||||
the csv parser will attempt to automatically detect the delimiter character.
|
||||
if the delimiter cannot be detected it will default to comma.
|
||||
the first row of the file must be a header row.
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux', 'darwin', 'cygwin', 'win32', 'aix', 'freebsd'
|
||||
|
||||
Examples:
|
||||
|
||||
$ cat homes.csv
|
||||
"Sell", "List", "Living", "Rooms", "Beds", "Baths", "Age", "Acres", "Taxes"
|
||||
142, 160, 28, 10, 5, 3, 60, 0.28, 3167
|
||||
175, 180, 18, 8, 4, 1, 12, 0.43, 4033
|
||||
129, 132, 13, 6, 3, 1, 41, 0.33, 1471
|
||||
...
|
||||
|
||||
$ cat homes.csv | jc --csv -p
|
||||
[
|
||||
{
|
||||
"Sell": "142",
|
||||
"List": "160",
|
||||
"Living": "28",
|
||||
"Rooms": "10",
|
||||
"Beds": "5",
|
||||
"Baths": "3",
|
||||
"Age": "60",
|
||||
"Acres": "0.28",
|
||||
"Taxes": "3167"
|
||||
},
|
||||
{
|
||||
"Sell": "175",
|
||||
"List": "180",
|
||||
"Living": "18",
|
||||
"Rooms": "8",
|
||||
"Beds": "4",
|
||||
"Baths": "1",
|
||||
"Age": "12",
|
||||
"Acres": "0.43",
|
||||
"Taxes": "4033"
|
||||
},
|
||||
{
|
||||
"Sell": "129",
|
||||
"List": "132",
|
||||
"Living": "13",
|
||||
"Rooms": "6",
|
||||
"Beds": "3",
|
||||
"Baths": "1",
|
||||
"Age": "41",
|
||||
"Acres": "0.33",
|
||||
"Taxes": "1471"
|
||||
},
|
||||
...
|
||||
]
|
||||
|
||||
## info
|
||||
```python
|
||||
info(self, /, *args, **kwargs)
|
||||
```
|
||||
|
||||
## process
|
||||
```python
|
||||
process(proc_data)
|
||||
```
|
||||
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Each dictionary represents a row in the csv file:
|
||||
|
||||
[
|
||||
{
|
||||
csv file converted to a Dictionary
|
||||
https://docs.python.org/3/library/csv.html
|
||||
}
|
||||
]
|
||||
|
||||
## parse
|
||||
```python
|
||||
parse(data, raw=False, quiet=False)
|
||||
```
|
||||
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Raw or processed structured data.
|
||||
|
||||
@@ -7,7 +7,7 @@ Usage:
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux', 'darwin'
|
||||
'linux', 'darwin', 'freebsd'
|
||||
|
||||
Examples:
|
||||
|
||||
|
||||
@@ -353,6 +353,15 @@ Returns:
|
||||
"answer_num": integer,
|
||||
"authority_num": integer,
|
||||
"additional_num": integer,
|
||||
"axfr": [
|
||||
{
|
||||
"name": string,
|
||||
"class": string,
|
||||
"type": string,
|
||||
"ttl": integer,
|
||||
"data": string
|
||||
}
|
||||
],
|
||||
"question": {
|
||||
"name": string,
|
||||
"class": string,
|
||||
@@ -380,6 +389,7 @@ Returns:
|
||||
"server": string,
|
||||
"when": string,
|
||||
"rcvd": integer
|
||||
"size": string
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
153
docs/parsers/dmidecode.md
Normal file
153
docs/parsers/dmidecode.md
Normal file
@@ -0,0 +1,153 @@
|
||||
# jc.parsers.dmidecode
|
||||
jc - JSON CLI output utility dmidecode Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --dmidecode as the first argument if the piped input is coming from dmidecode
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux'
|
||||
|
||||
Examples:
|
||||
|
||||
# dmidecode | jc --dmidecode -p
|
||||
[
|
||||
{
|
||||
"handle": "0x0000",
|
||||
"type": 0,
|
||||
"bytes": 24,
|
||||
"description": "BIOS Information",
|
||||
"values": {
|
||||
"vendor": "Phoenix Technologies LTD",
|
||||
"version": "6.00",
|
||||
"release_date": "04/13/2018",
|
||||
"address": "0xEA490",
|
||||
"runtime_size": "88944 bytes",
|
||||
"rom_size": "64 kB",
|
||||
"characteristics": [
|
||||
"ISA is supported",
|
||||
"PCI is supported",
|
||||
"PC Card (PCMCIA) is supported",
|
||||
"PNP is supported",
|
||||
"APM is supported",
|
||||
"BIOS is upgradeable",
|
||||
"BIOS shadowing is allowed",
|
||||
"ESCD support is available",
|
||||
"Boot from CD is supported",
|
||||
"Selectable boot is supported",
|
||||
"EDD is supported",
|
||||
"Print screen service is supported (int 5h)",
|
||||
"8042 keyboard services are supported (int 9h)",
|
||||
"Serial services are supported (int 14h)",
|
||||
"Printer services are supported (int 17h)",
|
||||
"CGA/mono video services are supported (int 10h)",
|
||||
"ACPI is supported",
|
||||
"Smart battery is supported",
|
||||
"BIOS boot specification is supported",
|
||||
"Function key-initiated network boot is supported",
|
||||
"Targeted content distribution is supported"
|
||||
],
|
||||
"bios_revision": "4.6",
|
||||
"firmware_revision": "0.0"
|
||||
}
|
||||
},
|
||||
...
|
||||
]
|
||||
|
||||
# dmidecode | jc --dmidecode -p -r
|
||||
[
|
||||
{
|
||||
"handle": "0x0000",
|
||||
"type": "0",
|
||||
"bytes": "24",
|
||||
"description": "BIOS Information",
|
||||
"values": {
|
||||
"vendor": "Phoenix Technologies LTD",
|
||||
"version": "6.00",
|
||||
"release_date": "04/13/2018",
|
||||
"address": "0xEA490",
|
||||
"runtime_size": "88944 bytes",
|
||||
"rom_size": "64 kB",
|
||||
"characteristics": [
|
||||
"ISA is supported",
|
||||
"PCI is supported",
|
||||
"PC Card (PCMCIA) is supported",
|
||||
"PNP is supported",
|
||||
"APM is supported",
|
||||
"BIOS is upgradeable",
|
||||
"BIOS shadowing is allowed",
|
||||
"ESCD support is available",
|
||||
"Boot from CD is supported",
|
||||
"Selectable boot is supported",
|
||||
"EDD is supported",
|
||||
"Print screen service is supported (int 5h)",
|
||||
"8042 keyboard services are supported (int 9h)",
|
||||
"Serial services are supported (int 14h)",
|
||||
"Printer services are supported (int 17h)",
|
||||
"CGA/mono video services are supported (int 10h)",
|
||||
"ACPI is supported",
|
||||
"Smart battery is supported",
|
||||
"BIOS boot specification is supported",
|
||||
"Function key-initiated network boot is supported",
|
||||
"Targeted content distribution is supported"
|
||||
],
|
||||
"bios_revision": "4.6",
|
||||
"firmware_revision": "0.0"
|
||||
}
|
||||
},
|
||||
...
|
||||
]
|
||||
|
||||
## info
|
||||
```python
|
||||
info(self, /, *args, **kwargs)
|
||||
```
|
||||
|
||||
## process
|
||||
```python
|
||||
process(proc_data)
|
||||
```
|
||||
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Structured data with the following schema:
|
||||
|
||||
[
|
||||
{
|
||||
"handle": string,
|
||||
"type": integer,
|
||||
"bytes": integer,
|
||||
"description": string,
|
||||
"values": { (null if empty)
|
||||
"lowercase_no_spaces_keys": string,
|
||||
"multiline_key_values": [
|
||||
string,
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
## parse
|
||||
```python
|
||||
parse(data, raw=False, quiet=False)
|
||||
```
|
||||
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Raw or processed structured data.
|
||||
|
||||
90
docs/parsers/file.md
Normal file
90
docs/parsers/file.md
Normal file
@@ -0,0 +1,90 @@
|
||||
# jc.parsers.file
|
||||
jc - JSON CLI output utility file command Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --file as the first argument if the piped input is coming from file.
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux', 'aix', 'freebsd', 'darwin'
|
||||
|
||||
Examples:
|
||||
|
||||
$ file * | jc --file -p
|
||||
[
|
||||
{
|
||||
"filename": "Applications",
|
||||
"type": "directory"
|
||||
},
|
||||
{
|
||||
"filename": "another file with spaces",
|
||||
"type": "empty"
|
||||
},
|
||||
{
|
||||
"filename": "argstest.py",
|
||||
"type": "Python script text executable, ASCII text"
|
||||
},
|
||||
{
|
||||
"filename": "blkid-p.out",
|
||||
"type": "ASCII text"
|
||||
},
|
||||
{
|
||||
"filename": "blkid-pi.out",
|
||||
"type": "ASCII text, with very long lines"
|
||||
},
|
||||
{
|
||||
"filename": "cd_catalog.xml",
|
||||
"type": "XML 1.0 document text, ASCII text, with CRLF line terminators"
|
||||
},
|
||||
{
|
||||
"filename": "centosserial.sh",
|
||||
"type": "Bourne-Again shell script text executable, UTF-8 Unicode text"
|
||||
},
|
||||
...
|
||||
]
|
||||
|
||||
## info
|
||||
```python
|
||||
info(self, /, *args, **kwargs)
|
||||
```
|
||||
|
||||
## process
|
||||
```python
|
||||
process(proc_data)
|
||||
```
|
||||
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Structured data with the following schema:
|
||||
|
||||
[
|
||||
{
|
||||
"filename": string,
|
||||
"type ": string
|
||||
}
|
||||
]
|
||||
|
||||
## parse
|
||||
```python
|
||||
parse(data, raw=False, quiet=False)
|
||||
```
|
||||
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Raw or processed structured data.
|
||||
|
||||
@@ -7,7 +7,7 @@ Usage:
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux'
|
||||
'linux', 'freebsd'
|
||||
|
||||
Examples:
|
||||
|
||||
|
||||
141
docs/parsers/group.md
Normal file
141
docs/parsers/group.md
Normal file
@@ -0,0 +1,141 @@
|
||||
# jc.parsers.group
|
||||
jc - JSON CLI output utility /etc/group file Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --group as the first argument if the piped input is coming from /etc/group
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux', 'darwin', 'aix', 'freebsd'
|
||||
|
||||
Examples:
|
||||
|
||||
$ cat /etc/group | jc --group -p
|
||||
[
|
||||
{
|
||||
"group_name": "nobody",
|
||||
"password": "*",
|
||||
"gid": -2,
|
||||
"members": []
|
||||
},
|
||||
{
|
||||
"group_name": "nogroup",
|
||||
"password": "*",
|
||||
"gid": -1,
|
||||
"members": []
|
||||
},
|
||||
{
|
||||
"group_name": "wheel",
|
||||
"password": "*",
|
||||
"gid": 0,
|
||||
"members": [
|
||||
"root"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group_name": "certusers",
|
||||
"password": "*",
|
||||
"gid": 29,
|
||||
"members": [
|
||||
"root",
|
||||
"_jabber",
|
||||
"_postfix",
|
||||
"_cyrus",
|
||||
"_calendar",
|
||||
"_dovecot"
|
||||
]
|
||||
},
|
||||
...
|
||||
]
|
||||
|
||||
$ cat /etc/group | jc --group -p -r
|
||||
[
|
||||
{
|
||||
"group_name": "nobody",
|
||||
"password": "*",
|
||||
"gid": "-2",
|
||||
"members": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"group_name": "nogroup",
|
||||
"password": "*",
|
||||
"gid": "-1",
|
||||
"members": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"group_name": "wheel",
|
||||
"password": "*",
|
||||
"gid": "0",
|
||||
"members": [
|
||||
"root"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group_name": "certusers",
|
||||
"password": "*",
|
||||
"gid": "29",
|
||||
"members": [
|
||||
"root",
|
||||
"_jabber",
|
||||
"_postfix",
|
||||
"_cyrus",
|
||||
"_calendar",
|
||||
"_dovecot"
|
||||
]
|
||||
},
|
||||
...
|
||||
]
|
||||
|
||||
## info
|
||||
```python
|
||||
info(self, /, *args, **kwargs)
|
||||
```
|
||||
|
||||
## process
|
||||
```python
|
||||
process(proc_data)
|
||||
```
|
||||
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Structured data with the following schema:
|
||||
|
||||
[
|
||||
{
|
||||
"group_name": string,
|
||||
"password": string,
|
||||
"gid": integer,
|
||||
"members": [
|
||||
string
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
## parse
|
||||
```python
|
||||
parse(data, raw=False, quiet=False)
|
||||
```
|
||||
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Raw or processed structured data.
|
||||
|
||||
109
docs/parsers/gshadow.md
Normal file
109
docs/parsers/gshadow.md
Normal file
@@ -0,0 +1,109 @@
|
||||
# jc.parsers.gshadow
|
||||
jc - JSON CLI output utility /etc/gshadow file Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --gshadow as the first argument if the piped input is coming from /etc/gshadow
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux', 'aix', 'freebsd'
|
||||
|
||||
Examples:
|
||||
|
||||
$ cat /etc/gshadow | jc --gshadow -p
|
||||
[
|
||||
{
|
||||
"group_name": "root",
|
||||
"password": "*",
|
||||
"administrators": [],
|
||||
"members": []
|
||||
},
|
||||
{
|
||||
"group_name": "adm",
|
||||
"password": "*",
|
||||
"administrators": [],
|
||||
"members": [
|
||||
"syslog",
|
||||
"joeuser"
|
||||
]
|
||||
},
|
||||
...
|
||||
]
|
||||
|
||||
$ cat /etc/gshadow | jc --gshadow -p -r
|
||||
[
|
||||
{
|
||||
"group_name": "root",
|
||||
"password": "*",
|
||||
"administrators": [
|
||||
""
|
||||
],
|
||||
"members": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"group_name": "adm",
|
||||
"password": "*",
|
||||
"administrators": [
|
||||
""
|
||||
],
|
||||
"members": [
|
||||
"syslog",
|
||||
"joeuser"
|
||||
]
|
||||
},
|
||||
...
|
||||
]
|
||||
|
||||
## info
|
||||
```python
|
||||
info(self, /, *args, **kwargs)
|
||||
```
|
||||
|
||||
## process
|
||||
```python
|
||||
process(proc_data)
|
||||
```
|
||||
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Structured data with the following schema:
|
||||
|
||||
[
|
||||
{
|
||||
"group_name": string,
|
||||
"password": string,
|
||||
"administrators": [
|
||||
string
|
||||
],
|
||||
"members": [
|
||||
string
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
## parse
|
||||
```python
|
||||
parse(data, raw=False, quiet=False)
|
||||
```
|
||||
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Raw or processed structured data.
|
||||
|
||||
@@ -147,6 +147,17 @@ Examples:
|
||||
info(self, /, *args, **kwargs)
|
||||
```
|
||||
|
||||
## IfconfigParser
|
||||
```python
|
||||
IfconfigParser(self, console_output)
|
||||
```
|
||||
|
||||
## InterfaceNotFound
|
||||
```python
|
||||
InterfaceNotFound(self, /, *args, **kwargs)
|
||||
```
|
||||
|
||||
|
||||
## process
|
||||
```python
|
||||
process(proc_data)
|
||||
|
||||
@@ -3,7 +3,9 @@ jc - JSON CLI output utility INI Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --ini as the first argument if the piped input is coming from an INI file
|
||||
Specify --ini as the first argument if the piped input is coming from an INI file or any
|
||||
simple key/value pair file. Delimiter can be '=' or ':'. Missing values are supported.
|
||||
Comment prefix can be '#' or ';'. Comments must be on their own line.
|
||||
|
||||
Compatibility:
|
||||
|
||||
@@ -61,11 +63,14 @@ Parameters:
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary representing an ini document:
|
||||
Dictionary representing an ini or simple key/value pair document:
|
||||
|
||||
{
|
||||
ini document converted to a dictionary
|
||||
see configparser standard library documentation for more details
|
||||
ini or key/value document converted to a dictionary - see configparser standard
|
||||
library documentation for more details.
|
||||
|
||||
Note: Values starting and ending with quotation marks will have the marks removed.
|
||||
If you would like to keep the quotation marks, use the -r or raw=True argument.
|
||||
}
|
||||
|
||||
## parse
|
||||
|
||||
118
docs/parsers/last.md
Normal file
118
docs/parsers/last.md
Normal file
@@ -0,0 +1,118 @@
|
||||
# jc.parsers.last
|
||||
jc - JSON CLI output utility last Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --last as the first argument if the piped input is coming from last or lastb
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux', 'darwin', 'aix', 'freebsd'
|
||||
|
||||
Examples:
|
||||
|
||||
$ last | jc --last -p
|
||||
[
|
||||
{
|
||||
"user": "kbrazil",
|
||||
"tty": "ttys002",
|
||||
"hostname": null,
|
||||
"login": "Thu Feb 27 14:31",
|
||||
"logout": "still logged in"
|
||||
},
|
||||
{
|
||||
"user": "kbrazil",
|
||||
"tty": "ttys003",
|
||||
"hostname": null,
|
||||
"login": "Thu Feb 27 10:38",
|
||||
"logout": "10:38",
|
||||
"duration": "00:00"
|
||||
},
|
||||
{
|
||||
"user": "kbrazil",
|
||||
"tty": "ttys003",
|
||||
"hostname": null,
|
||||
"login": "Thu Feb 27 10:18",
|
||||
"logout": "10:18",
|
||||
"duration": "00:00"
|
||||
},
|
||||
...
|
||||
]
|
||||
|
||||
$ last | jc --last -p -r
|
||||
[
|
||||
{
|
||||
"user": "kbrazil",
|
||||
"tty": "ttys002",
|
||||
"hostname": "-",
|
||||
"login": "Thu Feb 27 14:31",
|
||||
"logout": "still_logged_in"
|
||||
},
|
||||
{
|
||||
"user": "kbrazil",
|
||||
"tty": "ttys003",
|
||||
"hostname": "-",
|
||||
"login": "Thu Feb 27 10:38",
|
||||
"logout": "10:38",
|
||||
"duration": "00:00"
|
||||
},
|
||||
{
|
||||
"user": "kbrazil",
|
||||
"tty": "ttys003",
|
||||
"hostname": "-",
|
||||
"login": "Thu Feb 27 10:18",
|
||||
"logout": "10:18",
|
||||
"duration": "00:00"
|
||||
},
|
||||
...
|
||||
]
|
||||
|
||||
|
||||
## info
|
||||
```python
|
||||
info(self, /, *args, **kwargs)
|
||||
```
|
||||
|
||||
## process
|
||||
```python
|
||||
process(proc_data)
|
||||
```
|
||||
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Structured data with the following schema:
|
||||
|
||||
[
|
||||
{
|
||||
"user": string,
|
||||
"tty": string,
|
||||
"hostname": string,
|
||||
"login": string,
|
||||
"logout": string,
|
||||
"duration": string
|
||||
}
|
||||
]
|
||||
|
||||
## parse
|
||||
```python
|
||||
parse(data, raw=False, quiet=False)
|
||||
```
|
||||
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Raw or processed structured data.
|
||||
|
||||
@@ -1,20 +1,21 @@
|
||||
# jc.parsers.ls
|
||||
jc - JSON CLI output utility ls Parser
|
||||
|
||||
Note: The -l option of ls should be used to correctly parse filenames that include newline characters.
|
||||
Note: The -l or -b option of ls should be used to correctly parse filenames that include newline characters.
|
||||
Since ls does not encode newlines in filenames when outputting to a pipe it will cause jc to see
|
||||
multiple files instead of a single file if -l is not used.
|
||||
multiple files instead of a single file if -l or -b is not used.
|
||||
|
||||
Usage:
|
||||
|
||||
specify --ls as the first argument if the piped input is coming from ls
|
||||
|
||||
ls options supported:
|
||||
- laR
|
||||
|
||||
-lbaR
|
||||
--time-style=full-iso
|
||||
- h file sizes will be available in text form with -r but larger file sizes
|
||||
with human readable suffixes will be converted to Null in default view
|
||||
since the parser attempts to convert this field to an integer.
|
||||
-h file sizes will be available in text form with -r but larger file sizes
|
||||
with human readable suffixes will be converted to Null in default view
|
||||
since the parser attempts to convert this field to an integer.
|
||||
|
||||
Compatibility:
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ Usage:
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux', 'darwin'
|
||||
'linux', 'darwin', 'freebsd'
|
||||
|
||||
Example:
|
||||
|
||||
|
||||
@@ -5,13 +5,18 @@ Usage:
|
||||
|
||||
Specify --netstat as the first argument if the piped input is coming from netstat
|
||||
|
||||
Caveats:
|
||||
|
||||
- Use of multiple 'l' options is not supported on OSX (e.g. 'netstat -rlll')
|
||||
- Use of the 'A' option is not supported on OSX when using the 'r' option (e.g. netstat -rA)
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux'
|
||||
'linux', 'darwin', 'freebsd'
|
||||
|
||||
Examples:
|
||||
|
||||
$ sudo netstat -apee | jc --netstat -p
|
||||
# netstat -apee | jc --netstat -p
|
||||
[
|
||||
{
|
||||
"proto": "tcp",
|
||||
@@ -161,152 +166,83 @@ Examples:
|
||||
...
|
||||
]
|
||||
|
||||
$ sudo netstat -apee | jc --netstat -p -r
|
||||
$ netstat -r | jc --netstat -p
|
||||
[
|
||||
{
|
||||
"proto": "tcp",
|
||||
"recv_q": "0",
|
||||
"send_q": "0",
|
||||
"local_address": "localhost",
|
||||
"foreign_address": "0.0.0.0",
|
||||
"state": "LISTEN",
|
||||
"user": "systemd-resolve",
|
||||
"inode": "26958",
|
||||
"program_name": "systemd-resolve",
|
||||
"kind": "network",
|
||||
"pid": "887",
|
||||
"local_port": "domain",
|
||||
"foreign_port": "*",
|
||||
"transport_protocol": "tcp",
|
||||
"network_protocol": "ipv4"
|
||||
"destination": "default",
|
||||
"gateway": "gateway",
|
||||
"genmask": "0.0.0.0",
|
||||
"route_flags": "UG",
|
||||
"mss": 0,
|
||||
"window": 0,
|
||||
"irtt": 0,
|
||||
"iface": "ens33",
|
||||
"kind": "route",
|
||||
"route_flags_pretty": [
|
||||
"UP",
|
||||
"GATEWAY"
|
||||
]
|
||||
},
|
||||
{
|
||||
"proto": "tcp",
|
||||
"recv_q": "0",
|
||||
"send_q": "0",
|
||||
"local_address": "0.0.0.0",
|
||||
"foreign_address": "0.0.0.0",
|
||||
"state": "LISTEN",
|
||||
"user": "root",
|
||||
"inode": "30499",
|
||||
"program_name": "sshd",
|
||||
"kind": "network",
|
||||
"pid": "1186",
|
||||
"local_port": "ssh",
|
||||
"foreign_port": "*",
|
||||
"transport_protocol": "tcp",
|
||||
"network_protocol": "ipv4"
|
||||
"destination": "172.17.0.0",
|
||||
"gateway": "0.0.0.0",
|
||||
"genmask": "255.255.0.0",
|
||||
"route_flags": "U",
|
||||
"mss": 0,
|
||||
"window": 0,
|
||||
"irtt": 0,
|
||||
"iface": "docker0",
|
||||
"kind": "route",
|
||||
"route_flags_pretty": [
|
||||
"UP"
|
||||
]
|
||||
},
|
||||
{
|
||||
"proto": "tcp",
|
||||
"recv_q": "0",
|
||||
"send_q": "0",
|
||||
"local_address": "localhost",
|
||||
"foreign_address": "localhost",
|
||||
"state": "ESTABLISHED",
|
||||
"user": "root",
|
||||
"inode": "46829",
|
||||
"program_name": "sshd: root",
|
||||
"kind": "network",
|
||||
"pid": "2242",
|
||||
"local_port": "ssh",
|
||||
"foreign_port": "52186",
|
||||
"transport_protocol": "tcp",
|
||||
"network_protocol": "ipv4"
|
||||
"destination": "192.168.71.0",
|
||||
"gateway": "0.0.0.0",
|
||||
"genmask": "255.255.255.0",
|
||||
"route_flags": "U",
|
||||
"mss": 0,
|
||||
"window": 0,
|
||||
"irtt": 0,
|
||||
"iface": "ens33",
|
||||
"kind": "route",
|
||||
"route_flags_pretty": [
|
||||
"UP"
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
$ netstat -i | jc --netstat -p
|
||||
[
|
||||
{
|
||||
"iface": "ens33",
|
||||
"mtu": 1500,
|
||||
"rx_ok": 476,
|
||||
"rx_err": 0,
|
||||
"rx_drp": 0,
|
||||
"rx_ovr": 0,
|
||||
"tx_ok": 312,
|
||||
"tx_err": 0,
|
||||
"tx_drp": 0,
|
||||
"tx_ovr": 0,
|
||||
"flg": "BMRU",
|
||||
"kind": "interface"
|
||||
},
|
||||
{
|
||||
"proto": "tcp",
|
||||
"recv_q": "0",
|
||||
"send_q": "0",
|
||||
"local_address": "localhost",
|
||||
"foreign_address": "localhost",
|
||||
"state": "ESTABLISHED",
|
||||
"user": "root",
|
||||
"inode": "46828",
|
||||
"program_name": "ssh",
|
||||
"kind": "network",
|
||||
"pid": "2241",
|
||||
"local_port": "52186",
|
||||
"foreign_port": "ssh",
|
||||
"transport_protocol": "tcp",
|
||||
"network_protocol": "ipv4"
|
||||
},
|
||||
{
|
||||
"proto": "tcp6",
|
||||
"recv_q": "0",
|
||||
"send_q": "0",
|
||||
"local_address": "[::]",
|
||||
"foreign_address": "[::]",
|
||||
"state": "LISTEN",
|
||||
"user": "root",
|
||||
"inode": "30510",
|
||||
"program_name": "sshd",
|
||||
"kind": "network",
|
||||
"pid": "1186",
|
||||
"local_port": "ssh",
|
||||
"foreign_port": "*",
|
||||
"transport_protocol": "tcp",
|
||||
"network_protocol": "ipv6"
|
||||
},
|
||||
{
|
||||
"proto": "udp",
|
||||
"recv_q": "0",
|
||||
"send_q": "0",
|
||||
"local_address": "localhost",
|
||||
"foreign_address": "0.0.0.0",
|
||||
"state": null,
|
||||
"user": "systemd-resolve",
|
||||
"inode": "26957",
|
||||
"program_name": "systemd-resolve",
|
||||
"kind": "network",
|
||||
"pid": "887",
|
||||
"local_port": "domain",
|
||||
"foreign_port": "*",
|
||||
"transport_protocol": "udp",
|
||||
"network_protocol": "ipv4"
|
||||
},
|
||||
{
|
||||
"proto": "raw6",
|
||||
"recv_q": "0",
|
||||
"send_q": "0",
|
||||
"local_address": "[::]",
|
||||
"foreign_address": "[::]",
|
||||
"state": "7",
|
||||
"user": "systemd-network",
|
||||
"inode": "27001",
|
||||
"program_name": "systemd-network",
|
||||
"kind": "network",
|
||||
"pid": "867",
|
||||
"local_port": "ipv6-icmp",
|
||||
"foreign_port": "*",
|
||||
"transport_protocol": null,
|
||||
"network_protocol": "ipv6"
|
||||
},
|
||||
{
|
||||
"proto": "unix",
|
||||
"refcnt": "2",
|
||||
"flags": null,
|
||||
"type": "DGRAM",
|
||||
"state": null,
|
||||
"inode": "33322",
|
||||
"program_name": "systemd",
|
||||
"path": "/run/user/1000/systemd/notify",
|
||||
"kind": "socket",
|
||||
"pid": " 1607"
|
||||
},
|
||||
{
|
||||
"proto": "unix",
|
||||
"refcnt": "2",
|
||||
"flags": "ACC",
|
||||
"type": "SEQPACKET",
|
||||
"state": "LISTENING",
|
||||
"inode": "20835",
|
||||
"program_name": "init",
|
||||
"path": "/run/udev/control",
|
||||
"kind": "socket",
|
||||
"pid": " 1"
|
||||
},
|
||||
...
|
||||
"iface": "lo",
|
||||
"mtu": 65536,
|
||||
"rx_ok": 0,
|
||||
"rx_err": 0,
|
||||
"rx_drp": 0,
|
||||
"rx_ovr": 0,
|
||||
"tx_ok": 0,
|
||||
"tx_err": 0,
|
||||
"tx_drp": 0,
|
||||
"tx_ovr": 0,
|
||||
"flg": "LRU",
|
||||
"kind": "interface"
|
||||
}
|
||||
]
|
||||
|
||||
## info
|
||||
@@ -331,28 +267,100 @@ Returns:
|
||||
|
||||
[
|
||||
{
|
||||
"proto": string,
|
||||
"recv_q": integer,
|
||||
"send_q": integer,
|
||||
"transport_protocol" string,
|
||||
"network_protocol": string,
|
||||
"local_address": string,
|
||||
"local_port": string,
|
||||
"local_port_num": integer,
|
||||
"foreign_address": string,
|
||||
"foreign_port": string,
|
||||
"foreign_port_num": integer,
|
||||
"state": string,
|
||||
"program_name": string,
|
||||
"pid": integer,
|
||||
"user": string,
|
||||
"security_context": string,
|
||||
"refcnt": integer,
|
||||
"flags": string,
|
||||
"type": string,
|
||||
"inode": integer,
|
||||
"path": string,
|
||||
"kind": string
|
||||
"proto": string,
|
||||
"recv_q": integer,
|
||||
"send_q": integer,
|
||||
"transport_protocol" string,
|
||||
"network_protocol": string,
|
||||
"local_address": string,
|
||||
"local_port": string,
|
||||
"local_port_num": integer,
|
||||
"foreign_address": string,
|
||||
"foreign_port": string,
|
||||
"foreign_port_num": integer,
|
||||
"state": string,
|
||||
"program_name": string,
|
||||
"pid": integer,
|
||||
"user": string,
|
||||
"security_context": string,
|
||||
"refcnt": integer,
|
||||
"flags": string,
|
||||
"type": string,
|
||||
"inode": integer,
|
||||
"path": string,
|
||||
"kind": string,
|
||||
"address": string,
|
||||
"unix_inode": string,
|
||||
"conn": string,
|
||||
"refs": string,
|
||||
"nextref": string,
|
||||
"name": string,
|
||||
"unit": integer,
|
||||
"vendor": integer,
|
||||
"class": integer,
|
||||
"subcla": integer,
|
||||
"unix_flags": integer,
|
||||
"pcbcount": integer,
|
||||
"rcvbuf": integer,
|
||||
"sndbuf": integer,
|
||||
"rxbytes": integer,
|
||||
"txbytes": integer,
|
||||
"destination": string,
|
||||
"gateway": string,
|
||||
"route_flags": string,
|
||||
"route_flags_pretty": [
|
||||
string,
|
||||
]
|
||||
"route_refs": integer,
|
||||
"use": integer,
|
||||
"mtu": integer,
|
||||
"expire": string,
|
||||
"genmask": string,
|
||||
"mss": integer,
|
||||
"window": integer,
|
||||
"irtt": integer,
|
||||
"iface": string,
|
||||
"metric": integer,
|
||||
"network": string,
|
||||
"address": string,
|
||||
"ipkts": integer, - = null
|
||||
"ierrs": integer, - = null
|
||||
"idrop": integer, - = null
|
||||
"opkts": integer, - = null
|
||||
"oerrs": integer, - = null
|
||||
"coll": integer, - = null
|
||||
"rx_ok": integer,
|
||||
"rx_err": integer,
|
||||
"rx_drp": integer,
|
||||
"rx_ovr": integer,
|
||||
"tx_ok": integer,
|
||||
"tx_err": integer,
|
||||
"tx_drp": integer,
|
||||
"tx_ovr": integer,
|
||||
"flg": string,
|
||||
"ibytes": integer,
|
||||
"obytes": integer,
|
||||
"r_mbuf": integer,
|
||||
"s_mbuf": integer,
|
||||
"r_clus": integer,
|
||||
"s_clus": integer,
|
||||
"r_hiwa": integer,
|
||||
"s_hiwa": integer,
|
||||
"r_lowa": integer,
|
||||
"s_lowa": integer,
|
||||
"r_bcnt": integer,
|
||||
"s_bcnt": integer,
|
||||
"r_bmax": integer,
|
||||
"s_bmax": integer,
|
||||
"rexmit": integer,
|
||||
"ooorcv": integer,
|
||||
"0_win": integer,
|
||||
"rexmt": float,
|
||||
"persist": float,
|
||||
"keep": float,
|
||||
"2msl": float,
|
||||
"delack": float,
|
||||
"rcvtime": float,
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
235
docs/parsers/ntpq.md
Normal file
235
docs/parsers/ntpq.md
Normal file
@@ -0,0 +1,235 @@
|
||||
# jc.parsers.ntpq
|
||||
jc - JSON CLI output utility ntpq Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --ntpq as the first argument if the piped input is coming from ntpq -p
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux', 'freebsd'
|
||||
|
||||
Examples:
|
||||
|
||||
$ ntpq -p | jc --ntpq -p
|
||||
[
|
||||
{
|
||||
"remote": "44.190.6.254",
|
||||
"refid": "127.67.113.92",
|
||||
"st": 2,
|
||||
"t": "u",
|
||||
"when": 1,
|
||||
"poll": 64,
|
||||
"reach": 1,
|
||||
"delay": 23.399,
|
||||
"offset": -2.805,
|
||||
"jitter": 2.131,
|
||||
"state": null
|
||||
},
|
||||
{
|
||||
"remote": "ntp.wdc1.us.lea",
|
||||
"refid": "130.133.1.10",
|
||||
"st": 2,
|
||||
"t": "u",
|
||||
"when": null,
|
||||
"poll": 64,
|
||||
"reach": 1,
|
||||
"delay": 93.053,
|
||||
"offset": -0.807,
|
||||
"jitter": 2.839,
|
||||
"state": null
|
||||
},
|
||||
{
|
||||
"remote": "clock.team-cymr",
|
||||
"refid": "204.9.54.119",
|
||||
"st": 2,
|
||||
"t": "u",
|
||||
"when": null,
|
||||
"poll": 64,
|
||||
"reach": 1,
|
||||
"delay": 70.337,
|
||||
"offset": -2.909,
|
||||
"jitter": 2.6,
|
||||
"state": null
|
||||
},
|
||||
{
|
||||
"remote": "mirror1.sjc02.s",
|
||||
"refid": "216.218.254.202",
|
||||
"st": 2,
|
||||
"t": "u",
|
||||
"when": 2,
|
||||
"poll": 64,
|
||||
"reach": 1,
|
||||
"delay": 29.325,
|
||||
"offset": 1.044,
|
||||
"jitter": 4.069,
|
||||
"state": null,
|
||||
}
|
||||
]
|
||||
|
||||
$ ntpq -pn| jc --ntpq -p
|
||||
[
|
||||
{
|
||||
"remote": "44.190.6.254",
|
||||
"refid": "127.67.113.92",
|
||||
"st": 2,
|
||||
"t": "u",
|
||||
"when": 66,
|
||||
"poll": 64,
|
||||
"reach": 377,
|
||||
"delay": 22.69,
|
||||
"offset": -0.392,
|
||||
"jitter": 2.085,
|
||||
"state": "+"
|
||||
},
|
||||
{
|
||||
"remote": "108.59.2.24",
|
||||
"refid": "130.133.1.10",
|
||||
"st": 2,
|
||||
"t": "u",
|
||||
"when": 63,
|
||||
"poll": 64,
|
||||
"reach": 377,
|
||||
"delay": 90.805,
|
||||
"offset": 2.84,
|
||||
"jitter": 1.908,
|
||||
"state": "-"
|
||||
},
|
||||
{
|
||||
"remote": "38.229.71.1",
|
||||
"refid": "204.9.54.119",
|
||||
"st": 2,
|
||||
"t": "u",
|
||||
"when": 64,
|
||||
"poll": 64,
|
||||
"reach": 377,
|
||||
"delay": 68.699,
|
||||
"offset": -0.61,
|
||||
"jitter": 2.576,
|
||||
"state": "+"
|
||||
},
|
||||
{
|
||||
"remote": "72.5.72.15",
|
||||
"refid": "216.218.254.202",
|
||||
"st": 2,
|
||||
"t": "u",
|
||||
"when": 63,
|
||||
"poll": 64,
|
||||
"reach": 377,
|
||||
"delay": 22.654,
|
||||
"offset": 0.231,
|
||||
"jitter": 1.964,
|
||||
"state": "*"
|
||||
}
|
||||
]
|
||||
|
||||
$ ntpq -pn| jc --ntpq -p -r
|
||||
[
|
||||
{
|
||||
"s": "+",
|
||||
"remote": "44.190.6.254",
|
||||
"refid": "127.67.113.92",
|
||||
"st": "2",
|
||||
"t": "u",
|
||||
"when": "66",
|
||||
"poll": "64",
|
||||
"reach": "377",
|
||||
"delay": "22.690",
|
||||
"offset": "-0.392",
|
||||
"jitter": "2.085"
|
||||
},
|
||||
{
|
||||
"s": "-",
|
||||
"remote": "108.59.2.24",
|
||||
"refid": "130.133.1.10",
|
||||
"st": "2",
|
||||
"t": "u",
|
||||
"when": "63",
|
||||
"poll": "64",
|
||||
"reach": "377",
|
||||
"delay": "90.805",
|
||||
"offset": "2.840",
|
||||
"jitter": "1.908"
|
||||
},
|
||||
{
|
||||
"s": "+",
|
||||
"remote": "38.229.71.1",
|
||||
"refid": "204.9.54.119",
|
||||
"st": "2",
|
||||
"t": "u",
|
||||
"when": "64",
|
||||
"poll": "64",
|
||||
"reach": "377",
|
||||
"delay": "68.699",
|
||||
"offset": "-0.610",
|
||||
"jitter": "2.576"
|
||||
},
|
||||
{
|
||||
"s": "*",
|
||||
"remote": "72.5.72.15",
|
||||
"refid": "216.218.254.202",
|
||||
"st": "2",
|
||||
"t": "u",
|
||||
"when": "63",
|
||||
"poll": "64",
|
||||
"reach": "377",
|
||||
"delay": "22.654",
|
||||
"offset": "0.231",
|
||||
"jitter": "1.964"
|
||||
}
|
||||
]
|
||||
|
||||
## info
|
||||
```python
|
||||
info(self, /, *args, **kwargs)
|
||||
```
|
||||
|
||||
## process
|
||||
```python
|
||||
process(proc_data)
|
||||
```
|
||||
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Structured data with the following schema:
|
||||
|
||||
[
|
||||
{
|
||||
"state": string, # space/~ converted to null
|
||||
"remote": string,
|
||||
"refid": string,
|
||||
"st": integer,
|
||||
"t": string,
|
||||
"when": integer, # - converted to null
|
||||
"poll": integer,
|
||||
"reach": integer,
|
||||
"delay": float,
|
||||
"offset": float,
|
||||
"jitter": float
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
## parse
|
||||
```python
|
||||
parse(data, raw=False, quiet=False)
|
||||
```
|
||||
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Raw or processed structured data.
|
||||
|
||||
126
docs/parsers/passwd.md
Normal file
126
docs/parsers/passwd.md
Normal file
@@ -0,0 +1,126 @@
|
||||
# jc.parsers.passwd
|
||||
jc - JSON CLI output utility /etc/passwd file Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --passwd as the first argument if the piped input is coming from /etc/passwd
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux', 'darwin', 'aix', 'freebsd'
|
||||
|
||||
Examples:
|
||||
|
||||
$ cat /etc/passwd | jc --passwd -p
|
||||
[
|
||||
{
|
||||
"username": "nobody",
|
||||
"password": "*",
|
||||
"uid": -2,
|
||||
"gid": -2,
|
||||
"comment": "Unprivileged User",
|
||||
"home": "/var/empty",
|
||||
"shell": "/usr/bin/false"
|
||||
},
|
||||
{
|
||||
"username": "root",
|
||||
"password": "*",
|
||||
"uid": 0,
|
||||
"gid": 0,
|
||||
"comment": "System Administrator",
|
||||
"home": "/var/root",
|
||||
"shell": "/bin/sh"
|
||||
},
|
||||
{
|
||||
"username": "daemon",
|
||||
"password": "*",
|
||||
"uid": 1,
|
||||
"gid": 1,
|
||||
"comment": "System Services",
|
||||
"home": "/var/root",
|
||||
"shell": "/usr/bin/false"
|
||||
},
|
||||
...
|
||||
]
|
||||
|
||||
$ cat /etc/passwd | jc --passwd -p -r
|
||||
[
|
||||
{
|
||||
"username": "nobody",
|
||||
"password": "*",
|
||||
"uid": "-2",
|
||||
"gid": "-2",
|
||||
"comment": "Unprivileged User",
|
||||
"home": "/var/empty",
|
||||
"shell": "/usr/bin/false"
|
||||
},
|
||||
{
|
||||
"username": "root",
|
||||
"password": "*",
|
||||
"uid": "0",
|
||||
"gid": "0",
|
||||
"comment": "System Administrator",
|
||||
"home": "/var/root",
|
||||
"shell": "/bin/sh"
|
||||
},
|
||||
{
|
||||
"username": "daemon",
|
||||
"password": "*",
|
||||
"uid": "1",
|
||||
"gid": "1",
|
||||
"comment": "System Services",
|
||||
"home": "/var/root",
|
||||
"shell": "/usr/bin/false"
|
||||
},
|
||||
...
|
||||
]
|
||||
|
||||
## info
|
||||
```python
|
||||
info(self, /, *args, **kwargs)
|
||||
```
|
||||
|
||||
## process
|
||||
```python
|
||||
process(proc_data)
|
||||
```
|
||||
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Structured data with the following schema:
|
||||
|
||||
[
|
||||
{
|
||||
"username": string,
|
||||
"password": string,
|
||||
"uid": integer,
|
||||
"gid": integer,
|
||||
"comment": string,
|
||||
"home": string,
|
||||
"shell": string
|
||||
}
|
||||
]
|
||||
|
||||
## parse
|
||||
```python
|
||||
parse(data, raw=False, quiet=False)
|
||||
```
|
||||
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Raw or processed structured data.
|
||||
|
||||
171
docs/parsers/ping.md
Normal file
171
docs/parsers/ping.md
Normal file
@@ -0,0 +1,171 @@
|
||||
# jc.parsers.ping
|
||||
jc - JSON CLI output utility ping Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --ping as the first argument if the piped input is coming from ping
|
||||
|
||||
Note: Use the ping -c (count) option, otherwise data will not be piped to jc.
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux', 'darwin', 'freebsd'
|
||||
|
||||
Examples:
|
||||
|
||||
$ ping -c 3 -p ff cnn.com | jc --ping -p
|
||||
{
|
||||
"destination_ip": "151.101.1.67",
|
||||
"data_bytes": 56,
|
||||
"pattern": "0xff",
|
||||
"destination": "cnn.com",
|
||||
"packets_transmitted": 3,
|
||||
"packets_received": 3,
|
||||
"packet_loss_percent": 0.0,
|
||||
"duplicates": 0,
|
||||
"round_trip_ms_min": 28.015,
|
||||
"round_trip_ms_avg": 32.848,
|
||||
"round_trip_ms_max": 39.376,
|
||||
"round_trip_ms_stddev": 4.79,
|
||||
"responses": [
|
||||
{
|
||||
"type": "reply",
|
||||
"bytes": 64,
|
||||
"response_ip": "151.101.1.67",
|
||||
"icmp_seq": 0,
|
||||
"ttl": 59,
|
||||
"time_ms": 28.015,
|
||||
"duplicate": false
|
||||
},
|
||||
{
|
||||
"type": "reply",
|
||||
"bytes": 64,
|
||||
"response_ip": "151.101.1.67",
|
||||
"icmp_seq": 1,
|
||||
"ttl": 59,
|
||||
"time_ms": 39.376,
|
||||
"duplicate": false
|
||||
},
|
||||
{
|
||||
"type": "reply",
|
||||
"bytes": 64,
|
||||
"response_ip": "151.101.1.67",
|
||||
"icmp_seq": 2,
|
||||
"ttl": 59,
|
||||
"time_ms": 31.153,
|
||||
"duplicate": false
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
$ ping -c 3 -p ff cnn.com | jc --ping -p -r
|
||||
{
|
||||
"destination_ip": "151.101.129.67",
|
||||
"data_bytes": "56",
|
||||
"pattern": "0xff",
|
||||
"destination": "cnn.com",
|
||||
"packets_transmitted": "3",
|
||||
"packets_received": "3",
|
||||
"packet_loss_percent": "0.0",
|
||||
"duplicates": "0",
|
||||
"round_trip_ms_min": "25.078",
|
||||
"round_trip_ms_avg": "29.543",
|
||||
"round_trip_ms_max": "32.553",
|
||||
"round_trip_ms_stddev": "3.221",
|
||||
"responses": [
|
||||
{
|
||||
"type": "reply",
|
||||
"bytes": "64",
|
||||
"response_ip": "151.101.129.67",
|
||||
"icmp_seq": "0",
|
||||
"ttl": "59",
|
||||
"time_ms": "25.078",
|
||||
"duplicate": false
|
||||
},
|
||||
{
|
||||
"type": "reply",
|
||||
"bytes": "64",
|
||||
"response_ip": "151.101.129.67",
|
||||
"icmp_seq": "1",
|
||||
"ttl": "59",
|
||||
"time_ms": "30.999",
|
||||
"duplicate": false
|
||||
},
|
||||
{
|
||||
"type": "reply",
|
||||
"bytes": "64",
|
||||
"response_ip": "151.101.129.67",
|
||||
"icmp_seq": "2",
|
||||
"ttl": "59",
|
||||
"time_ms": "32.553",
|
||||
"duplicate": false
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
## info
|
||||
```python
|
||||
info(self, /, *args, **kwargs)
|
||||
```
|
||||
|
||||
## process
|
||||
```python
|
||||
process(proc_data)
|
||||
```
|
||||
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Structured data with the following schema:
|
||||
|
||||
{
|
||||
"source_ip": string,
|
||||
"destination_ip": string,
|
||||
"data_bytes": integer,
|
||||
"pattern": string, (null if not set)
|
||||
"destination": string,
|
||||
"packets_transmitted": integer,
|
||||
"packets_received": integer,
|
||||
"packet_loss_percent": float,
|
||||
"duplicates": integer,
|
||||
"round_trip_ms_min": float,
|
||||
"round_trip_ms_avg": float,
|
||||
"round_trip_ms_max": float,
|
||||
"round_trip_ms_stddev": float,
|
||||
"responses": [
|
||||
{
|
||||
"type": string, ('reply' or 'timeout')
|
||||
"timestamp": float,
|
||||
"bytes": integer,
|
||||
"response_ip": string,
|
||||
"icmp_seq": integer,
|
||||
"ttl": integer,
|
||||
"time_ms": float,
|
||||
"duplicate": boolean
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
## parse
|
||||
```python
|
||||
parse(data, raw=False, quiet=False)
|
||||
```
|
||||
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Raw or processed structured data.
|
||||
|
||||
@@ -15,53 +15,48 @@ Examples:
|
||||
[
|
||||
{
|
||||
"destination": "default",
|
||||
"gateway": "gateway",
|
||||
"gateway": "_gateway",
|
||||
"genmask": "0.0.0.0",
|
||||
"flags": "UG",
|
||||
"metric": 100,
|
||||
"metric": 202,
|
||||
"ref": 0,
|
||||
"use": 0,
|
||||
"iface": "ens33",
|
||||
"mss": 0,
|
||||
"window": 0,
|
||||
"irtt": 0
|
||||
},
|
||||
{
|
||||
"destination": "172.17.0.0",
|
||||
"gateway": "0.0.0.0",
|
||||
"genmask": "255.255.0.0",
|
||||
"flags": "U",
|
||||
"metric": 0,
|
||||
"ref": 0,
|
||||
"use": 0,
|
||||
"iface": "docker",
|
||||
"mss": 0,
|
||||
"window": 0,
|
||||
"irtt": 0
|
||||
"irtt": 0,
|
||||
"flags_pretty": [
|
||||
"UP",
|
||||
"GATEWAY"
|
||||
]
|
||||
},
|
||||
{
|
||||
"destination": "192.168.71.0",
|
||||
"gateway": "0.0.0.0",
|
||||
"genmask": "255.255.255.0",
|
||||
"flags": "U",
|
||||
"metric": 100,
|
||||
"metric": 202,
|
||||
"ref": 0,
|
||||
"use": 0,
|
||||
"iface": "ens33",
|
||||
"mss": 0,
|
||||
"window": 0,
|
||||
"irtt": 0
|
||||
"irtt": 0,
|
||||
"flags_pretty": [
|
||||
"UP"
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
$ route -ee | jc --route -p -r
|
||||
[
|
||||
{
|
||||
"destination": "default",
|
||||
"gateway": "gateway",
|
||||
"gateway": "_gateway",
|
||||
"genmask": "0.0.0.0",
|
||||
"flags": "UG",
|
||||
"metric": "100",
|
||||
"metric": "202",
|
||||
"ref": "0",
|
||||
"use": "0",
|
||||
"iface": "ens33",
|
||||
@@ -69,25 +64,12 @@ Examples:
|
||||
"window": "0",
|
||||
"irtt": "0"
|
||||
},
|
||||
{
|
||||
"destination": "172.17.0.0",
|
||||
"gateway": "0.0.0.0",
|
||||
"genmask": "255.255.0.0",
|
||||
"flags": "U",
|
||||
"metric": "0",
|
||||
"ref": "0",
|
||||
"use": "0",
|
||||
"iface": "docker",
|
||||
"mss": "0",
|
||||
"window": "0",
|
||||
"irtt": "0"
|
||||
},
|
||||
{
|
||||
"destination": "192.168.71.0",
|
||||
"gateway": "0.0.0.0",
|
||||
"genmask": "255.255.255.0",
|
||||
"flags": "U",
|
||||
"metric": "100",
|
||||
"metric": "202",
|
||||
"ref": "0",
|
||||
"use": "0",
|
||||
"iface": "ens33",
|
||||
@@ -97,6 +79,7 @@ Examples:
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
## info
|
||||
```python
|
||||
info(self, /, *args, **kwargs)
|
||||
@@ -119,17 +102,20 @@ Returns:
|
||||
|
||||
[
|
||||
{
|
||||
"destination": string,
|
||||
"gateway": string,
|
||||
"genmask": string,
|
||||
"flags": string,
|
||||
"metric": integer,
|
||||
"ref": integer,
|
||||
"use": integer,
|
||||
"mss": integer,
|
||||
"window": integer,
|
||||
"irtt": integer,
|
||||
"iface": string
|
||||
"destination": string,
|
||||
"gateway": string,
|
||||
"genmask": string,
|
||||
"flags": string,
|
||||
"flags_pretty": [
|
||||
string,
|
||||
]
|
||||
"metric": integer,
|
||||
"ref": integer,
|
||||
"use": integer,
|
||||
"mss": integer,
|
||||
"window": integer,
|
||||
"irtt": integer,
|
||||
"iface": string
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
133
docs/parsers/shadow.md
Normal file
133
docs/parsers/shadow.md
Normal file
@@ -0,0 +1,133 @@
|
||||
# jc.parsers.shadow
|
||||
jc - JSON CLI output utility /etc/shadow file Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --shadow as the first argument if the piped input is coming from /etc/shadow
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux', 'darwin', 'aix', 'freebsd'
|
||||
|
||||
Examples:
|
||||
|
||||
$ sudo cat /etc/shadow | jc --shadow -p
|
||||
[
|
||||
{
|
||||
"username": "root",
|
||||
"password": "*",
|
||||
"last_changed": 18113,
|
||||
"minimum": 0,
|
||||
"maximum": 99999,
|
||||
"warn": 7,
|
||||
"inactive": null,
|
||||
"expire": null
|
||||
},
|
||||
{
|
||||
"username": "daemon",
|
||||
"password": "*",
|
||||
"last_changed": 18113,
|
||||
"minimum": 0,
|
||||
"maximum": 99999,
|
||||
"warn": 7,
|
||||
"inactive": null,
|
||||
"expire": null
|
||||
},
|
||||
{
|
||||
"username": "bin",
|
||||
"password": "*",
|
||||
"last_changed": 18113,
|
||||
"minimum": 0,
|
||||
"maximum": 99999,
|
||||
"warn": 7,
|
||||
"inactive": null,
|
||||
"expire": null
|
||||
},
|
||||
...
|
||||
]
|
||||
|
||||
$ sudo cat /etc/shadow | jc --shadow -p -r
|
||||
[
|
||||
{
|
||||
"username": "root",
|
||||
"password": "*",
|
||||
"last_changed": "18113",
|
||||
"minimum": "0",
|
||||
"maximum": "99999",
|
||||
"warn": "7",
|
||||
"inactive": "",
|
||||
"expire": ""
|
||||
},
|
||||
{
|
||||
"username": "daemon",
|
||||
"password": "*",
|
||||
"last_changed": "18113",
|
||||
"minimum": "0",
|
||||
"maximum": "99999",
|
||||
"warn": "7",
|
||||
"inactive": "",
|
||||
"expire": ""
|
||||
},
|
||||
{
|
||||
"username": "bin",
|
||||
"password": "*",
|
||||
"last_changed": "18113",
|
||||
"minimum": "0",
|
||||
"maximum": "99999",
|
||||
"warn": "7",
|
||||
"inactive": "",
|
||||
"expire": ""
|
||||
},
|
||||
...
|
||||
]
|
||||
|
||||
## info
|
||||
```python
|
||||
info(self, /, *args, **kwargs)
|
||||
```
|
||||
|
||||
## process
|
||||
```python
|
||||
process(proc_data)
|
||||
```
|
||||
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Structured data with the following schema:
|
||||
|
||||
[
|
||||
{
|
||||
"username": string,
|
||||
"password": string,
|
||||
"last_changed": integer,
|
||||
"minimum": integer,
|
||||
"maximum": integer,
|
||||
"warn": integer,
|
||||
"inactive": integer,
|
||||
"expire": integer
|
||||
}
|
||||
]
|
||||
|
||||
## parse
|
||||
```python
|
||||
parse(data, raw=False, quiet=False)
|
||||
```
|
||||
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Raw or processed structured data.
|
||||
|
||||
@@ -7,7 +7,7 @@ Usage:
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux'
|
||||
'linux', 'darwin', 'freebsd'
|
||||
|
||||
Examples:
|
||||
|
||||
@@ -141,7 +141,11 @@ Returns:
|
||||
"access_time": string, # - = null
|
||||
"modify_time": string, # - = null
|
||||
"change_time": string, # - = null
|
||||
"birth_time": string # - = null
|
||||
"birth_time": string, # - = null
|
||||
"unix_device": integer,
|
||||
"rdev": integer,
|
||||
"block_size": integer,
|
||||
"unix_flags": string
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
84
docs/parsers/sysctl.md
Normal file
84
docs/parsers/sysctl.md
Normal file
@@ -0,0 +1,84 @@
|
||||
# jc.parsers.sysctl
|
||||
jc - JSON CLI output utility sysctl -a Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --sysctl as the first argument if the piped input is coming from sysctl -a
|
||||
|
||||
Note: since sysctl output is not easily parsable only a very simple key/value object
|
||||
will be output. An attempt is made to convert obvious integers and floats. If no
|
||||
conversion is desired, use the -r (raw) option.
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux', 'darwin', 'freebsd'
|
||||
|
||||
Examples:
|
||||
|
||||
$ sysctl | jc --sysctl -p
|
||||
{
|
||||
"user.cs_path": "/usr/bin:/bin:/usr/sbin:/sbin",
|
||||
"user.bc_base_max": 99,
|
||||
"user.bc_dim_max": 2048,
|
||||
"user.bc_scale_max": 99,
|
||||
"user.bc_string_max": 1000,
|
||||
"user.coll_weights_max": 2,
|
||||
"user.expr_nest_max": 32
|
||||
...
|
||||
}
|
||||
|
||||
$ sysctl | jc --sysctl -p -r
|
||||
{
|
||||
"user.cs_path": "/usr/bin:/bin:/usr/sbin:/sbin",
|
||||
"user.bc_base_max": "99",
|
||||
"user.bc_dim_max": "2048",
|
||||
"user.bc_scale_max": "99",
|
||||
"user.bc_string_max": "1000",
|
||||
"user.coll_weights_max": "2",
|
||||
"user.expr_nest_max": "32",
|
||||
...
|
||||
}
|
||||
|
||||
## info
|
||||
```python
|
||||
info(self, /, *args, **kwargs)
|
||||
```
|
||||
|
||||
## process
|
||||
```python
|
||||
process(proc_data)
|
||||
```
|
||||
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Structured data with the following schema:
|
||||
|
||||
{
|
||||
"foo": string/integer/float, # best guess based on value
|
||||
"bar": string/integer/float,
|
||||
"baz": string/integer/float
|
||||
}
|
||||
|
||||
## parse
|
||||
```python
|
||||
parse(data, raw=False, quiet=False)
|
||||
```
|
||||
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Raw or processed structured data.
|
||||
|
||||
87
docs/parsers/timedatectl.md
Normal file
87
docs/parsers/timedatectl.md
Normal file
@@ -0,0 +1,87 @@
|
||||
# jc.parsers.timedatectl
|
||||
jc - JSON CLI output utility timedatectl Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --timedatectl as the first argument if the piped input is coming from timedatectl or timedatectl status
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux'
|
||||
|
||||
Examples:
|
||||
|
||||
$ timedatectl | jc --timedatectl -p
|
||||
{
|
||||
"local_time": "Tue 2020-03-10 17:53:21 PDT",
|
||||
"universal_time": "Wed 2020-03-11 00:53:21 UTC",
|
||||
"rtc_time": "Wed 2020-03-11 00:53:21",
|
||||
"time_zone": "America/Los_Angeles (PDT, -0700)",
|
||||
"ntp_enabled": true,
|
||||
"ntp_synchronized": true,
|
||||
"rtc_in_local_tz": false,
|
||||
"dst_active": true
|
||||
}
|
||||
|
||||
$ timedatectl | jc --timedatectl -p -r
|
||||
{
|
||||
"local_time": "Tue 2020-03-10 17:53:21 PDT",
|
||||
"universal_time": "Wed 2020-03-11 00:53:21 UTC",
|
||||
"rtc_time": "Wed 2020-03-11 00:53:21",
|
||||
"time_zone": "America/Los_Angeles (PDT, -0700)",
|
||||
"ntp_enabled": "yes",
|
||||
"ntp_synchronized": "yes",
|
||||
"rtc_in_local_tz": "no",
|
||||
"dst_active": "yes"
|
||||
}
|
||||
|
||||
## info
|
||||
```python
|
||||
info(self, /, *args, **kwargs)
|
||||
```
|
||||
|
||||
## process
|
||||
```python
|
||||
process(proc_data)
|
||||
```
|
||||
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Structured data with the following schema:
|
||||
|
||||
{
|
||||
"local_time": string,
|
||||
"universal_time": string,
|
||||
"rtc_time": string,
|
||||
"time_zone": string,
|
||||
"ntp_enabled": boolean,
|
||||
"ntp_synchronized": boolean,
|
||||
"system_clock_synchronized": boolean,
|
||||
"systemd-timesyncd.service_active": boolean,
|
||||
"rtc_in_local_tz": boolean,
|
||||
"dst_active": boolean
|
||||
}
|
||||
|
||||
## parse
|
||||
```python
|
||||
parse(data, raw=False, quiet=False)
|
||||
```
|
||||
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Raw or processed structured data.
|
||||
|
||||
158
docs/parsers/tracepath.md
Normal file
158
docs/parsers/tracepath.md
Normal file
@@ -0,0 +1,158 @@
|
||||
# jc.parsers.tracepath
|
||||
jc - JSON CLI output utility tracepath Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --tracepath as the first argument if the piped input is coming from tracepath
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux'
|
||||
|
||||
Examples:
|
||||
|
||||
$ tracepath6 3ffe:2400:0:109::2 | jc --tracepath -p
|
||||
{
|
||||
"pmtu": 1480,
|
||||
"forward_hops": 2,
|
||||
"return_hops": 2,
|
||||
"hops": [
|
||||
{
|
||||
"ttl": 1,
|
||||
"guess": true,
|
||||
"host": "[LOCALHOST]",
|
||||
"reply_ms": null,
|
||||
"pmtu": 1500,
|
||||
"asymmetric_difference": null,
|
||||
"reached": false
|
||||
},
|
||||
{
|
||||
"ttl": 1,
|
||||
"guess": false,
|
||||
"host": "dust.inr.ac.ru",
|
||||
"reply_ms": 0.411,
|
||||
"pmtu": null,
|
||||
"asymmetric_difference": null,
|
||||
"reached": false
|
||||
},
|
||||
{
|
||||
"ttl": 2,
|
||||
"guess": false,
|
||||
"host": "dust.inr.ac.ru",
|
||||
"reply_ms": 0.39,
|
||||
"pmtu": 1480,
|
||||
"asymmetric_difference": 1,
|
||||
"reached": false
|
||||
},
|
||||
{
|
||||
"ttl": 2,
|
||||
"guess": false,
|
||||
"host": "3ffe:2400:0:109::2",
|
||||
"reply_ms": 463.514,
|
||||
"pmtu": null,
|
||||
"asymmetric_difference": null,
|
||||
"reached": true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
$ tracepath6 3ffe:2400:0:109::2 | jc --tracepath -p -r
|
||||
{
|
||||
"pmtu": "1480",
|
||||
"forward_hops": "2",
|
||||
"return_hops": "2",
|
||||
"hops": [
|
||||
{
|
||||
"ttl": "1",
|
||||
"guess": true,
|
||||
"host": "[LOCALHOST]",
|
||||
"reply_ms": null,
|
||||
"pmtu": "1500",
|
||||
"asymmetric_difference": null,
|
||||
"reached": false
|
||||
},
|
||||
{
|
||||
"ttl": "1",
|
||||
"guess": false,
|
||||
"host": "dust.inr.ac.ru",
|
||||
"reply_ms": "0.411",
|
||||
"pmtu": null,
|
||||
"asymmetric_difference": null,
|
||||
"reached": false
|
||||
},
|
||||
{
|
||||
"ttl": "2",
|
||||
"guess": false,
|
||||
"host": "dust.inr.ac.ru",
|
||||
"reply_ms": "0.390",
|
||||
"pmtu": "1480",
|
||||
"asymmetric_difference": "1",
|
||||
"reached": false
|
||||
},
|
||||
{
|
||||
"ttl": "2",
|
||||
"guess": false,
|
||||
"host": "3ffe:2400:0:109::2",
|
||||
"reply_ms": "463.514",
|
||||
"pmtu": null,
|
||||
"asymmetric_difference": null,
|
||||
"reached": true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
## info
|
||||
```python
|
||||
info(self, /, *args, **kwargs)
|
||||
```
|
||||
|
||||
## process
|
||||
```python
|
||||
process(proc_data)
|
||||
```
|
||||
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Structured data with the following schema:
|
||||
|
||||
{
|
||||
"pmtu": integer,
|
||||
"forward_hops": integer,
|
||||
"return_hops": integer,
|
||||
"hops": [
|
||||
{
|
||||
"ttl": integer,
|
||||
"guess": boolean,
|
||||
"host": string,
|
||||
"reply_ms": float,
|
||||
"pmtu": integer,
|
||||
"asymmetric_difference": integer,
|
||||
"reached": boolean
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
## parse
|
||||
```python
|
||||
parse(data, raw=False, quiet=False)
|
||||
```
|
||||
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Raw or processed structured data.
|
||||
|
||||
147
docs/parsers/traceroute.md
Normal file
147
docs/parsers/traceroute.md
Normal file
@@ -0,0 +1,147 @@
|
||||
# jc.parsers.traceroute
|
||||
jc - JSON CLI output utility traceroute Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --traceroute as the first argument if the piped input is coming from traceroute
|
||||
|
||||
Note: on OSX and FreeBSD be sure to redirect STDERR to STDOUT since the header line is sent to STDERR
|
||||
e.g. $ traceroute 8.8.8.8 2>&1 | jc --traceroute
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux', 'darwin', 'freebsd'
|
||||
|
||||
Examples:
|
||||
|
||||
$ traceroute google.com | jc --traceroute -p
|
||||
{
|
||||
"destination_ip": "216.58.194.46",
|
||||
"destination_name": "google.com",
|
||||
"hops": [
|
||||
{
|
||||
"hop": 1,
|
||||
"probes": [
|
||||
{
|
||||
"annotation": null,
|
||||
"asn": null,
|
||||
"ip": "216.230.231.141",
|
||||
"name": "216-230-231-141.static.houston.tx.oplink.net",
|
||||
"rtt": 198.574
|
||||
},
|
||||
{
|
||||
"annotation": null,
|
||||
"asn": null,
|
||||
"ip": "216.230.231.141",
|
||||
"name": "216-230-231-141.static.houston.tx.oplink.net",
|
||||
"rtt": null
|
||||
},
|
||||
{
|
||||
"annotation": null,
|
||||
"asn": null,
|
||||
"ip": "216.230.231.141",
|
||||
"name": "216-230-231-141.static.houston.tx.oplink.net",
|
||||
"rtt": 198.65
|
||||
}
|
||||
]
|
||||
},
|
||||
...
|
||||
]
|
||||
}
|
||||
|
||||
$ traceroute google.com | jc --traceroute -p -r
|
||||
{
|
||||
"destination_ip": "216.58.194.46",
|
||||
"destination_name": "google.com",
|
||||
"hops": [
|
||||
{
|
||||
"hop": "1",
|
||||
"probes": [
|
||||
{
|
||||
"annotation": null,
|
||||
"asn": null,
|
||||
"ip": "216.230.231.141",
|
||||
"name": "216-230-231-141.static.houston.tx.oplink.net",
|
||||
"rtt": "198.574"
|
||||
},
|
||||
{
|
||||
"annotation": null,
|
||||
"asn": null,
|
||||
"ip": "216.230.231.141",
|
||||
"name": "216-230-231-141.static.houston.tx.oplink.net",
|
||||
"rtt": null
|
||||
},
|
||||
{
|
||||
"annotation": null,
|
||||
"asn": null,
|
||||
"ip": "216.230.231.141",
|
||||
"name": "216-230-231-141.static.houston.tx.oplink.net",
|
||||
"rtt": "198.650"
|
||||
}
|
||||
]
|
||||
},
|
||||
...
|
||||
]
|
||||
}
|
||||
|
||||
## info
|
||||
```python
|
||||
info(self, /, *args, **kwargs)
|
||||
```
|
||||
|
||||
## Hop
|
||||
```python
|
||||
Hop(self, idx)
|
||||
```
|
||||
|
||||
## process
|
||||
```python
|
||||
process(proc_data)
|
||||
```
|
||||
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Structured data with the following schema:
|
||||
|
||||
{
|
||||
"destination_ip": string,
|
||||
"destination_name": string,
|
||||
"hops": [
|
||||
{
|
||||
"hop": integer,
|
||||
"probes": [
|
||||
{
|
||||
"annotation": string,
|
||||
"asn": integer,
|
||||
"ip": string,
|
||||
"name": string,
|
||||
"rtt": float
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
## parse
|
||||
```python
|
||||
parse(data, raw=False, quiet=False)
|
||||
```
|
||||
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Raw or processed structured data.
|
||||
|
||||
@@ -11,7 +11,7 @@ Limitations:
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux', 'darwin'
|
||||
'linux', 'darwin', 'freebsd'
|
||||
|
||||
Example:
|
||||
|
||||
|
||||
152
docs/parsers/who.md
Normal file
152
docs/parsers/who.md
Normal file
@@ -0,0 +1,152 @@
|
||||
# jc.parsers.who
|
||||
jc - JSON CLI output utility who Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --who as the first argument if the piped input is coming from who
|
||||
|
||||
accepts any of the following who options (or no options): -aTH
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux', 'darwin', 'cygwin', 'aix', 'freebsd'
|
||||
|
||||
Examples:
|
||||
|
||||
$ who -a | jc --who -p
|
||||
[
|
||||
{
|
||||
"event": "reboot",
|
||||
"time": "Feb 7 23:31",
|
||||
"pid": 1
|
||||
},
|
||||
{
|
||||
"user": "joeuser",
|
||||
"writeable_tty": "-",
|
||||
"tty": "console",
|
||||
"time": "Feb 7 23:32",
|
||||
"idle": "old",
|
||||
"pid": 105
|
||||
},
|
||||
{
|
||||
"user": "joeuser",
|
||||
"writeable_tty": "+",
|
||||
"tty": "ttys000",
|
||||
"time": "Feb 13 16:44",
|
||||
"idle": ".",
|
||||
"pid": 51217,
|
||||
"comment": "term=0 exit=0"
|
||||
},
|
||||
{
|
||||
"user": "joeuser",
|
||||
"writeable_tty": "?",
|
||||
"tty": "ttys003",
|
||||
"time": "Feb 28 08:59",
|
||||
"idle": "01:36",
|
||||
"pid": 41402
|
||||
},
|
||||
{
|
||||
"user": "joeuser",
|
||||
"writeable_tty": "+",
|
||||
"tty": "ttys004",
|
||||
"time": "Mar 1 16:35",
|
||||
"idle": ".",
|
||||
"pid": 15679,
|
||||
"from": "192.168.1.5"
|
||||
}
|
||||
]
|
||||
|
||||
$ who -a | jc --who -p -r
|
||||
[
|
||||
{
|
||||
"event": "reboot",
|
||||
"time": "Feb 7 23:31",
|
||||
"pid": "1"
|
||||
},
|
||||
{
|
||||
"user": "joeuser",
|
||||
"writeable_tty": "-",
|
||||
"tty": "console",
|
||||
"time": "Feb 7 23:32",
|
||||
"idle": "old",
|
||||
"pid": "105"
|
||||
},
|
||||
{
|
||||
"user": "joeuser",
|
||||
"writeable_tty": "+",
|
||||
"tty": "ttys000",
|
||||
"time": "Feb 13 16:44",
|
||||
"idle": ".",
|
||||
"pid": "51217",
|
||||
"comment": "term=0 exit=0"
|
||||
},
|
||||
{
|
||||
"user": "joeuser",
|
||||
"writeable_tty": "?",
|
||||
"tty": "ttys003",
|
||||
"time": "Feb 28 08:59",
|
||||
"idle": "01:36",
|
||||
"pid": "41402"
|
||||
},
|
||||
{
|
||||
"user": "joeuser",
|
||||
"writeable_tty": "+",
|
||||
"tty": "ttys004",
|
||||
"time": "Mar 1 16:35",
|
||||
"idle": ".",
|
||||
"pid": "15679",
|
||||
"from": "192.168.1.5"
|
||||
}
|
||||
]
|
||||
|
||||
## info
|
||||
```python
|
||||
info(self, /, *args, **kwargs)
|
||||
```
|
||||
|
||||
## process
|
||||
```python
|
||||
process(proc_data)
|
||||
```
|
||||
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Structured data with the following schema:
|
||||
|
||||
[
|
||||
{
|
||||
"user": string,
|
||||
"event": string,
|
||||
"writeable_tty": string,
|
||||
"tty": string,
|
||||
"time": string,
|
||||
"idle": string,
|
||||
"pid": integer,
|
||||
"from": string,
|
||||
"comment": string
|
||||
}
|
||||
]
|
||||
|
||||
## parse
|
||||
```python
|
||||
parse(data, raw=False, quiet=False)
|
||||
```
|
||||
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Raw or processed structured data.
|
||||
|
||||
@@ -48,3 +48,18 @@ Returns:
|
||||
|
||||
no return, just prints output to STDERR
|
||||
|
||||
## has_data
|
||||
```python
|
||||
has_data(data)
|
||||
```
|
||||
|
||||
Checks if the input contains data. If there are any non-whitespace characters then return True, else return False
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) input to check whether it contains data
|
||||
|
||||
Returns:
|
||||
|
||||
Boolean True if input string (data) contains non-whitespace characters, otherwise False
|
||||
|
||||
|
||||
611
jc/appdirs.py
Normal file
611
jc/appdirs.py
Normal file
@@ -0,0 +1,611 @@
|
||||
# -*- coding: utf-8 -*-
|
||||
# Copyright (c) 2005-2010 ActiveState Software Inc.
|
||||
# Copyright (c) 2013 Eddy Petrișor
|
||||
|
||||
'''
|
||||
# This is the MIT license
|
||||
|
||||
Copyright (c) 2010 ActiveState Software Inc.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a
|
||||
copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included
|
||||
in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
|
||||
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
|
||||
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
|
||||
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
|
||||
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
|
||||
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
'''
|
||||
|
||||
"""Utilities for determining application-specific dirs.
|
||||
|
||||
See <https://github.com/ActiveState/appdirs> for details and usage.
|
||||
"""
|
||||
# Dev Notes:
|
||||
# - MSDN on where to store app data files:
|
||||
# http://support.microsoft.com/default.aspx?scid=kb;en-us;310294#XSLTH3194121123120121120120
|
||||
# - Mac OS X: http://developer.apple.com/documentation/MacOSX/Conceptual/BPFileSystem/index.html
|
||||
# - XDG spec for Un*x: https://standards.freedesktop.org/basedir-spec/basedir-spec-latest.html
|
||||
|
||||
__version__ = "1.4.4"
|
||||
__version_info__ = tuple(int(segment) for segment in __version__.split("."))
|
||||
|
||||
|
||||
import sys
|
||||
import os
|
||||
|
||||
PY3 = sys.version_info[0] == 3
|
||||
|
||||
if PY3:
|
||||
unicode = str
|
||||
|
||||
if sys.platform.startswith('java'):
|
||||
import platform
|
||||
os_name = platform.java_ver()[3][0]
|
||||
if os_name.startswith('Windows'): # "Windows XP", "Windows 7", etc.
|
||||
system = 'win32'
|
||||
elif os_name.startswith('Mac'): # "Mac OS X", etc.
|
||||
system = 'darwin'
|
||||
else: # "Linux", "SunOS", "FreeBSD", etc.
|
||||
# Setting this to "linux2" is not ideal, but only Windows or Mac
|
||||
# are actually checked for and the rest of the module expects
|
||||
# *sys.platform* style strings.
|
||||
system = 'linux2'
|
||||
else:
|
||||
system = sys.platform
|
||||
|
||||
|
||||
|
||||
def user_data_dir(appname=None, appauthor=None, version=None, roaming=False):
|
||||
r"""Return full path to the user-specific data dir for this application.
|
||||
|
||||
"appname" is the name of application.
|
||||
If None, just the system directory is returned.
|
||||
"appauthor" (only used on Windows) is the name of the
|
||||
appauthor or distributing body for this application. Typically
|
||||
it is the owning company name. This falls back to appname. You may
|
||||
pass False to disable it.
|
||||
"version" is an optional version path element to append to the
|
||||
path. You might want to use this if you want multiple versions
|
||||
of your app to be able to run independently. If used, this
|
||||
would typically be "<major>.<minor>".
|
||||
Only applied when appname is present.
|
||||
"roaming" (boolean, default False) can be set True to use the Windows
|
||||
roaming appdata directory. That means that for users on a Windows
|
||||
network setup for roaming profiles, this user data will be
|
||||
sync'd on login. See
|
||||
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
|
||||
for a discussion of issues.
|
||||
|
||||
Typical user data directories are:
|
||||
Mac OS X: ~/Library/Application Support/<AppName>
|
||||
Unix: ~/.local/share/<AppName> # or in $XDG_DATA_HOME, if defined
|
||||
Win XP (not roaming): C:\Documents and Settings\<username>\Application Data\<AppAuthor>\<AppName>
|
||||
Win XP (roaming): C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>
|
||||
Win 7 (not roaming): C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>
|
||||
Win 7 (roaming): C:\Users\<username>\AppData\Roaming\<AppAuthor>\<AppName>
|
||||
|
||||
For Unix, we follow the XDG spec and support $XDG_DATA_HOME.
|
||||
That means, by default "~/.local/share/<AppName>".
|
||||
"""
|
||||
if system == "win32":
|
||||
if appauthor is None:
|
||||
appauthor = appname
|
||||
const = roaming and "CSIDL_APPDATA" or "CSIDL_LOCAL_APPDATA"
|
||||
path = os.path.normpath(_get_win_folder(const))
|
||||
if appname:
|
||||
if appauthor is not False:
|
||||
path = os.path.join(path, appauthor, appname)
|
||||
else:
|
||||
path = os.path.join(path, appname)
|
||||
elif system == 'darwin':
|
||||
path = os.path.expanduser('~/Library/Application Support/')
|
||||
if appname:
|
||||
path = os.path.join(path, appname)
|
||||
else:
|
||||
path = os.getenv('XDG_DATA_HOME', os.path.expanduser("~/.local/share"))
|
||||
if appname:
|
||||
path = os.path.join(path, appname)
|
||||
if appname and version:
|
||||
path = os.path.join(path, version)
|
||||
return path
|
||||
|
||||
|
||||
def site_data_dir(appname=None, appauthor=None, version=None, multipath=False):
|
||||
r"""Return full path to the user-shared data dir for this application.
|
||||
|
||||
"appname" is the name of application.
|
||||
If None, just the system directory is returned.
|
||||
"appauthor" (only used on Windows) is the name of the
|
||||
appauthor or distributing body for this application. Typically
|
||||
it is the owning company name. This falls back to appname. You may
|
||||
pass False to disable it.
|
||||
"version" is an optional version path element to append to the
|
||||
path. You might want to use this if you want multiple versions
|
||||
of your app to be able to run independently. If used, this
|
||||
would typically be "<major>.<minor>".
|
||||
Only applied when appname is present.
|
||||
"multipath" is an optional parameter only applicable to *nix
|
||||
which indicates that the entire list of data dirs should be
|
||||
returned. By default, the first item from XDG_DATA_DIRS is
|
||||
returned, or '/usr/local/share/<AppName>',
|
||||
if XDG_DATA_DIRS is not set
|
||||
|
||||
Typical site data directories are:
|
||||
Mac OS X: /Library/Application Support/<AppName>
|
||||
Unix: /usr/local/share/<AppName> or /usr/share/<AppName>
|
||||
Win XP: C:\Documents and Settings\All Users\Application Data\<AppAuthor>\<AppName>
|
||||
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
|
||||
Win 7: C:\ProgramData\<AppAuthor>\<AppName> # Hidden, but writeable on Win 7.
|
||||
|
||||
For Unix, this is using the $XDG_DATA_DIRS[0] default.
|
||||
|
||||
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
|
||||
"""
|
||||
if system == "win32":
|
||||
if appauthor is None:
|
||||
appauthor = appname
|
||||
path = os.path.normpath(_get_win_folder("CSIDL_COMMON_APPDATA"))
|
||||
if appname:
|
||||
if appauthor is not False:
|
||||
path = os.path.join(path, appauthor, appname)
|
||||
else:
|
||||
path = os.path.join(path, appname)
|
||||
elif system == 'darwin':
|
||||
path = os.path.expanduser('/Library/Application Support')
|
||||
if appname:
|
||||
path = os.path.join(path, appname)
|
||||
else:
|
||||
# XDG default for $XDG_DATA_DIRS
|
||||
# only first, if multipath is False
|
||||
path = os.getenv('XDG_DATA_DIRS',
|
||||
os.pathsep.join(['/usr/local/share', '/usr/share']))
|
||||
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
|
||||
if appname:
|
||||
if version:
|
||||
appname = os.path.join(appname, version)
|
||||
pathlist = [os.sep.join([x, appname]) for x in pathlist]
|
||||
|
||||
if multipath:
|
||||
path = os.pathsep.join(pathlist)
|
||||
else:
|
||||
path = pathlist[0]
|
||||
return path
|
||||
|
||||
if appname and version:
|
||||
path = os.path.join(path, version)
|
||||
return path
|
||||
|
||||
|
||||
def user_config_dir(appname=None, appauthor=None, version=None, roaming=False):
|
||||
r"""Return full path to the user-specific config dir for this application.
|
||||
|
||||
"appname" is the name of application.
|
||||
If None, just the system directory is returned.
|
||||
"appauthor" (only used on Windows) is the name of the
|
||||
appauthor or distributing body for this application. Typically
|
||||
it is the owning company name. This falls back to appname. You may
|
||||
pass False to disable it.
|
||||
"version" is an optional version path element to append to the
|
||||
path. You might want to use this if you want multiple versions
|
||||
of your app to be able to run independently. If used, this
|
||||
would typically be "<major>.<minor>".
|
||||
Only applied when appname is present.
|
||||
"roaming" (boolean, default False) can be set True to use the Windows
|
||||
roaming appdata directory. That means that for users on a Windows
|
||||
network setup for roaming profiles, this user data will be
|
||||
sync'd on login. See
|
||||
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
|
||||
for a discussion of issues.
|
||||
|
||||
Typical user config directories are:
|
||||
Mac OS X: ~/Library/Preferences/<AppName>
|
||||
Unix: ~/.config/<AppName> # or in $XDG_CONFIG_HOME, if defined
|
||||
Win *: same as user_data_dir
|
||||
|
||||
For Unix, we follow the XDG spec and support $XDG_CONFIG_HOME.
|
||||
That means, by default "~/.config/<AppName>".
|
||||
"""
|
||||
if system == "win32":
|
||||
path = user_data_dir(appname, appauthor, None, roaming)
|
||||
elif system == 'darwin':
|
||||
path = os.path.expanduser('~/Library/Preferences/')
|
||||
if appname:
|
||||
path = os.path.join(path, appname)
|
||||
else:
|
||||
path = os.getenv('XDG_CONFIG_HOME', os.path.expanduser("~/.config"))
|
||||
if appname:
|
||||
path = os.path.join(path, appname)
|
||||
if appname and version:
|
||||
path = os.path.join(path, version)
|
||||
return path
|
||||
|
||||
|
||||
def site_config_dir(appname=None, appauthor=None, version=None, multipath=False):
|
||||
r"""Return full path to the user-shared data dir for this application.
|
||||
|
||||
"appname" is the name of application.
|
||||
If None, just the system directory is returned.
|
||||
"appauthor" (only used on Windows) is the name of the
|
||||
appauthor or distributing body for this application. Typically
|
||||
it is the owning company name. This falls back to appname. You may
|
||||
pass False to disable it.
|
||||
"version" is an optional version path element to append to the
|
||||
path. You might want to use this if you want multiple versions
|
||||
of your app to be able to run independently. If used, this
|
||||
would typically be "<major>.<minor>".
|
||||
Only applied when appname is present.
|
||||
"multipath" is an optional parameter only applicable to *nix
|
||||
which indicates that the entire list of config dirs should be
|
||||
returned. By default, the first item from XDG_CONFIG_DIRS is
|
||||
returned, or '/etc/xdg/<AppName>', if XDG_CONFIG_DIRS is not set
|
||||
|
||||
Typical site config directories are:
|
||||
Mac OS X: same as site_data_dir
|
||||
Unix: /etc/xdg/<AppName> or $XDG_CONFIG_DIRS[i]/<AppName> for each value in
|
||||
$XDG_CONFIG_DIRS
|
||||
Win *: same as site_data_dir
|
||||
Vista: (Fail! "C:\ProgramData" is a hidden *system* directory on Vista.)
|
||||
|
||||
For Unix, this is using the $XDG_CONFIG_DIRS[0] default, if multipath=False
|
||||
|
||||
WARNING: Do not use this on Windows. See the Vista-Fail note above for why.
|
||||
"""
|
||||
if system == 'win32':
|
||||
path = site_data_dir(appname, appauthor)
|
||||
if appname and version:
|
||||
path = os.path.join(path, version)
|
||||
elif system == 'darwin':
|
||||
path = os.path.expanduser('/Library/Preferences')
|
||||
if appname:
|
||||
path = os.path.join(path, appname)
|
||||
else:
|
||||
# XDG default for $XDG_CONFIG_DIRS
|
||||
# only first, if multipath is False
|
||||
path = os.getenv('XDG_CONFIG_DIRS', '/etc/xdg')
|
||||
pathlist = [os.path.expanduser(x.rstrip(os.sep)) for x in path.split(os.pathsep)]
|
||||
if appname:
|
||||
if version:
|
||||
appname = os.path.join(appname, version)
|
||||
pathlist = [os.sep.join([x, appname]) for x in pathlist]
|
||||
|
||||
if multipath:
|
||||
path = os.pathsep.join(pathlist)
|
||||
else:
|
||||
path = pathlist[0]
|
||||
return path
|
||||
|
||||
|
||||
def user_cache_dir(appname=None, appauthor=None, version=None, opinion=True):
|
||||
r"""Return full path to the user-specific cache dir for this application.
|
||||
|
||||
"appname" is the name of application.
|
||||
If None, just the system directory is returned.
|
||||
"appauthor" (only used on Windows) is the name of the
|
||||
appauthor or distributing body for this application. Typically
|
||||
it is the owning company name. This falls back to appname. You may
|
||||
pass False to disable it.
|
||||
"version" is an optional version path element to append to the
|
||||
path. You might want to use this if you want multiple versions
|
||||
of your app to be able to run independently. If used, this
|
||||
would typically be "<major>.<minor>".
|
||||
Only applied when appname is present.
|
||||
"opinion" (boolean) can be False to disable the appending of
|
||||
"Cache" to the base app data dir for Windows. See
|
||||
discussion below.
|
||||
|
||||
Typical user cache directories are:
|
||||
Mac OS X: ~/Library/Caches/<AppName>
|
||||
Unix: ~/.cache/<AppName> (XDG default)
|
||||
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Cache
|
||||
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Cache
|
||||
|
||||
On Windows the only suggestion in the MSDN docs is that local settings go in
|
||||
the `CSIDL_LOCAL_APPDATA` directory. This is identical to the non-roaming
|
||||
app data dir (the default returned by `user_data_dir` above). Apps typically
|
||||
put cache data somewhere *under* the given dir here. Some examples:
|
||||
...\Mozilla\Firefox\Profiles\<ProfileName>\Cache
|
||||
...\Acme\SuperApp\Cache\1.0
|
||||
OPINION: This function appends "Cache" to the `CSIDL_LOCAL_APPDATA` value.
|
||||
This can be disabled with the `opinion=False` option.
|
||||
"""
|
||||
if system == "win32":
|
||||
if appauthor is None:
|
||||
appauthor = appname
|
||||
path = os.path.normpath(_get_win_folder("CSIDL_LOCAL_APPDATA"))
|
||||
if appname:
|
||||
if appauthor is not False:
|
||||
path = os.path.join(path, appauthor, appname)
|
||||
else:
|
||||
path = os.path.join(path, appname)
|
||||
if opinion:
|
||||
path = os.path.join(path, "Cache")
|
||||
elif system == 'darwin':
|
||||
path = os.path.expanduser('~/Library/Caches')
|
||||
if appname:
|
||||
path = os.path.join(path, appname)
|
||||
else:
|
||||
path = os.getenv('XDG_CACHE_HOME', os.path.expanduser('~/.cache'))
|
||||
if appname:
|
||||
path = os.path.join(path, appname)
|
||||
if appname and version:
|
||||
path = os.path.join(path, version)
|
||||
return path
|
||||
|
||||
|
||||
def user_state_dir(appname=None, appauthor=None, version=None, roaming=False):
|
||||
r"""Return full path to the user-specific state dir for this application.
|
||||
|
||||
"appname" is the name of application.
|
||||
If None, just the system directory is returned.
|
||||
"appauthor" (only used on Windows) is the name of the
|
||||
appauthor or distributing body for this application. Typically
|
||||
it is the owning company name. This falls back to appname. You may
|
||||
pass False to disable it.
|
||||
"version" is an optional version path element to append to the
|
||||
path. You might want to use this if you want multiple versions
|
||||
of your app to be able to run independently. If used, this
|
||||
would typically be "<major>.<minor>".
|
||||
Only applied when appname is present.
|
||||
"roaming" (boolean, default False) can be set True to use the Windows
|
||||
roaming appdata directory. That means that for users on a Windows
|
||||
network setup for roaming profiles, this user data will be
|
||||
sync'd on login. See
|
||||
<http://technet.microsoft.com/en-us/library/cc766489(WS.10).aspx>
|
||||
for a discussion of issues.
|
||||
|
||||
Typical user state directories are:
|
||||
Mac OS X: same as user_data_dir
|
||||
Unix: ~/.local/state/<AppName> # or in $XDG_STATE_HOME, if defined
|
||||
Win *: same as user_data_dir
|
||||
|
||||
For Unix, we follow this Debian proposal <https://wiki.debian.org/XDGBaseDirectorySpecification#state>
|
||||
to extend the XDG spec and support $XDG_STATE_HOME.
|
||||
|
||||
That means, by default "~/.local/state/<AppName>".
|
||||
"""
|
||||
if system in ["win32", "darwin"]:
|
||||
path = user_data_dir(appname, appauthor, None, roaming)
|
||||
else:
|
||||
path = os.getenv('XDG_STATE_HOME', os.path.expanduser("~/.local/state"))
|
||||
if appname:
|
||||
path = os.path.join(path, appname)
|
||||
if appname and version:
|
||||
path = os.path.join(path, version)
|
||||
return path
|
||||
|
||||
|
||||
def user_log_dir(appname=None, appauthor=None, version=None, opinion=True):
|
||||
r"""Return full path to the user-specific log dir for this application.
|
||||
|
||||
"appname" is the name of application.
|
||||
If None, just the system directory is returned.
|
||||
"appauthor" (only used on Windows) is the name of the
|
||||
appauthor or distributing body for this application. Typically
|
||||
it is the owning company name. This falls back to appname. You may
|
||||
pass False to disable it.
|
||||
"version" is an optional version path element to append to the
|
||||
path. You might want to use this if you want multiple versions
|
||||
of your app to be able to run independently. If used, this
|
||||
would typically be "<major>.<minor>".
|
||||
Only applied when appname is present.
|
||||
"opinion" (boolean) can be False to disable the appending of
|
||||
"Logs" to the base app data dir for Windows, and "log" to the
|
||||
base cache dir for Unix. See discussion below.
|
||||
|
||||
Typical user log directories are:
|
||||
Mac OS X: ~/Library/Logs/<AppName>
|
||||
Unix: ~/.cache/<AppName>/log # or under $XDG_CACHE_HOME if defined
|
||||
Win XP: C:\Documents and Settings\<username>\Local Settings\Application Data\<AppAuthor>\<AppName>\Logs
|
||||
Vista: C:\Users\<username>\AppData\Local\<AppAuthor>\<AppName>\Logs
|
||||
|
||||
On Windows the only suggestion in the MSDN docs is that local settings
|
||||
go in the `CSIDL_LOCAL_APPDATA` directory. (Note: I'm interested in
|
||||
examples of what some windows apps use for a logs dir.)
|
||||
|
||||
OPINION: This function appends "Logs" to the `CSIDL_LOCAL_APPDATA`
|
||||
value for Windows and appends "log" to the user cache dir for Unix.
|
||||
This can be disabled with the `opinion=False` option.
|
||||
"""
|
||||
if system == "darwin":
|
||||
path = os.path.join(
|
||||
os.path.expanduser('~/Library/Logs'),
|
||||
appname)
|
||||
elif system == "win32":
|
||||
path = user_data_dir(appname, appauthor, version)
|
||||
version = False
|
||||
if opinion:
|
||||
path = os.path.join(path, "Logs")
|
||||
else:
|
||||
path = user_cache_dir(appname, appauthor, version)
|
||||
version = False
|
||||
if opinion:
|
||||
path = os.path.join(path, "log")
|
||||
if appname and version:
|
||||
path = os.path.join(path, version)
|
||||
return path
|
||||
|
||||
|
||||
class AppDirs(object):
|
||||
"""Convenience wrapper for getting application dirs."""
|
||||
def __init__(self, appname=None, appauthor=None, version=None,
|
||||
roaming=False, multipath=False):
|
||||
self.appname = appname
|
||||
self.appauthor = appauthor
|
||||
self.version = version
|
||||
self.roaming = roaming
|
||||
self.multipath = multipath
|
||||
|
||||
@property
|
||||
def user_data_dir(self):
|
||||
return user_data_dir(self.appname, self.appauthor,
|
||||
version=self.version, roaming=self.roaming)
|
||||
|
||||
@property
|
||||
def site_data_dir(self):
|
||||
return site_data_dir(self.appname, self.appauthor,
|
||||
version=self.version, multipath=self.multipath)
|
||||
|
||||
@property
|
||||
def user_config_dir(self):
|
||||
return user_config_dir(self.appname, self.appauthor,
|
||||
version=self.version, roaming=self.roaming)
|
||||
|
||||
@property
|
||||
def site_config_dir(self):
|
||||
return site_config_dir(self.appname, self.appauthor,
|
||||
version=self.version, multipath=self.multipath)
|
||||
|
||||
@property
|
||||
def user_cache_dir(self):
|
||||
return user_cache_dir(self.appname, self.appauthor,
|
||||
version=self.version)
|
||||
|
||||
@property
|
||||
def user_state_dir(self):
|
||||
return user_state_dir(self.appname, self.appauthor,
|
||||
version=self.version)
|
||||
|
||||
@property
|
||||
def user_log_dir(self):
|
||||
return user_log_dir(self.appname, self.appauthor,
|
||||
version=self.version)
|
||||
|
||||
|
||||
#---- internal support stuff
|
||||
|
||||
def _get_win_folder_from_registry(csidl_name):
|
||||
"""This is a fallback technique at best. I'm not sure if using the
|
||||
registry for this guarantees us the correct answer for all CSIDL_*
|
||||
names.
|
||||
"""
|
||||
if PY3:
|
||||
import winreg as _winreg
|
||||
else:
|
||||
import _winreg
|
||||
|
||||
shell_folder_name = {
|
||||
"CSIDL_APPDATA": "AppData",
|
||||
"CSIDL_COMMON_APPDATA": "Common AppData",
|
||||
"CSIDL_LOCAL_APPDATA": "Local AppData",
|
||||
}[csidl_name]
|
||||
|
||||
key = _winreg.OpenKey(
|
||||
_winreg.HKEY_CURRENT_USER,
|
||||
r"Software\Microsoft\Windows\CurrentVersion\Explorer\Shell Folders"
|
||||
)
|
||||
dir, type = _winreg.QueryValueEx(key, shell_folder_name)
|
||||
return dir
|
||||
|
||||
|
||||
def _get_win_folder_with_ctypes(csidl_name):
|
||||
import ctypes
|
||||
|
||||
csidl_const = {
|
||||
"CSIDL_APPDATA": 26,
|
||||
"CSIDL_COMMON_APPDATA": 35,
|
||||
"CSIDL_LOCAL_APPDATA": 28,
|
||||
}[csidl_name]
|
||||
|
||||
buf = ctypes.create_unicode_buffer(1024)
|
||||
ctypes.windll.shell32.SHGetFolderPathW(None, csidl_const, None, 0, buf)
|
||||
|
||||
# Downgrade to short path name if have highbit chars. See
|
||||
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
|
||||
has_high_char = False
|
||||
for c in buf:
|
||||
if ord(c) > 255:
|
||||
has_high_char = True
|
||||
break
|
||||
if has_high_char:
|
||||
buf2 = ctypes.create_unicode_buffer(1024)
|
||||
if ctypes.windll.kernel32.GetShortPathNameW(buf.value, buf2, 1024):
|
||||
buf = buf2
|
||||
|
||||
return buf.value
|
||||
|
||||
def _get_win_folder_with_jna(csidl_name):
|
||||
import array
|
||||
from com.sun import jna
|
||||
from com.sun.jna.platform import win32
|
||||
|
||||
buf_size = win32.WinDef.MAX_PATH * 2
|
||||
buf = array.zeros('c', buf_size)
|
||||
shell = win32.Shell32.INSTANCE
|
||||
shell.SHGetFolderPath(None, getattr(win32.ShlObj, csidl_name), None, win32.ShlObj.SHGFP_TYPE_CURRENT, buf)
|
||||
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
|
||||
|
||||
# Downgrade to short path name if have highbit chars. See
|
||||
# <http://bugs.activestate.com/show_bug.cgi?id=85099>.
|
||||
has_high_char = False
|
||||
for c in dir:
|
||||
if ord(c) > 255:
|
||||
has_high_char = True
|
||||
break
|
||||
if has_high_char:
|
||||
buf = array.zeros('c', buf_size)
|
||||
kernel = win32.Kernel32.INSTANCE
|
||||
if kernel.GetShortPathName(dir, buf, buf_size):
|
||||
dir = jna.Native.toString(buf.tostring()).rstrip("\0")
|
||||
|
||||
return dir
|
||||
|
||||
if system == "win32":
|
||||
try:
|
||||
from ctypes import windll
|
||||
except ImportError:
|
||||
try:
|
||||
import com.sun.jna
|
||||
except ImportError:
|
||||
_get_win_folder = _get_win_folder_from_registry
|
||||
else:
|
||||
_get_win_folder = _get_win_folder_with_jna
|
||||
else:
|
||||
_get_win_folder = _get_win_folder_with_ctypes
|
||||
|
||||
|
||||
#---- self test code
|
||||
|
||||
if __name__ == "__main__":
|
||||
appname = "MyApp"
|
||||
appauthor = "MyCompany"
|
||||
|
||||
props = ("user_data_dir",
|
||||
"user_config_dir",
|
||||
"user_cache_dir",
|
||||
"user_state_dir",
|
||||
"user_log_dir",
|
||||
"site_data_dir",
|
||||
"site_config_dir")
|
||||
|
||||
print("-- app dirs %s --" % __version__)
|
||||
|
||||
print("-- app dirs (with optional 'version')")
|
||||
dirs = AppDirs(appname, appauthor, version="1.0")
|
||||
for prop in props:
|
||||
print("%s: %s" % (prop, getattr(dirs, prop)))
|
||||
|
||||
print("\n-- app dirs (without optional 'version')")
|
||||
dirs = AppDirs(appname, appauthor)
|
||||
for prop in props:
|
||||
print("%s: %s" % (prop, getattr(dirs, prop)))
|
||||
|
||||
print("\n-- app dirs (without optional 'appauthor')")
|
||||
dirs = AppDirs(appname)
|
||||
for prop in props:
|
||||
print("%s: %s" % (prop, getattr(dirs, prop)))
|
||||
|
||||
print("\n-- app dirs (with disabled 'appauthor')")
|
||||
dirs = AppDirs(appname, appauthor=False)
|
||||
for prop in props:
|
||||
print("%s: %s" % (prop, getattr(dirs, prop)))
|
||||
397
jc/cli.py
397
jc/cli.py
@@ -1,20 +1,28 @@
|
||||
#!/usr/bin/env python3
|
||||
"""jc - JSON CLI output utility
|
||||
JC cli module
|
||||
"""
|
||||
|
||||
import sys
|
||||
import os
|
||||
import os.path
|
||||
import re
|
||||
import shlex
|
||||
import importlib
|
||||
import textwrap
|
||||
import signal
|
||||
import json
|
||||
import jc.utils
|
||||
import pygments
|
||||
from pygments import highlight
|
||||
from pygments.style import Style
|
||||
from pygments.token import (Name, Number, String, Keyword)
|
||||
from pygments.lexers import JsonLexer
|
||||
from pygments.formatters import Terminal256Formatter
|
||||
import jc.appdirs as appdirs
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.7.5'
|
||||
description = 'jc cli output JSON conversion tool'
|
||||
version = '1.13.0'
|
||||
description = 'JSON CLI output utility'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
|
||||
@@ -22,15 +30,23 @@ class info():
|
||||
__version__ = info.version
|
||||
|
||||
parsers = [
|
||||
'airport',
|
||||
'airport-s',
|
||||
'arp',
|
||||
'blkid',
|
||||
'crontab',
|
||||
'crontab-u',
|
||||
'csv',
|
||||
'df',
|
||||
'dig',
|
||||
'dmidecode',
|
||||
'du',
|
||||
'env',
|
||||
'file',
|
||||
'free',
|
||||
'fstab',
|
||||
'group',
|
||||
'gshadow',
|
||||
'history',
|
||||
'hosts',
|
||||
'id',
|
||||
@@ -38,58 +54,180 @@ parsers = [
|
||||
'ini',
|
||||
'iptables',
|
||||
'jobs',
|
||||
'last',
|
||||
'ls',
|
||||
'lsblk',
|
||||
'lsmod',
|
||||
'lsof',
|
||||
'mount',
|
||||
'netstat',
|
||||
'ntpq',
|
||||
'passwd',
|
||||
'ping',
|
||||
'pip-list',
|
||||
'pip-show',
|
||||
'ps',
|
||||
'route',
|
||||
'shadow',
|
||||
'ss',
|
||||
'stat',
|
||||
'sysctl',
|
||||
'systemctl',
|
||||
'systemctl-lj',
|
||||
'systemctl-ls',
|
||||
'systemctl-luf',
|
||||
'timedatectl',
|
||||
'tracepath',
|
||||
'traceroute',
|
||||
'uname',
|
||||
'uptime',
|
||||
'w',
|
||||
'who',
|
||||
'xml',
|
||||
'yaml'
|
||||
]
|
||||
|
||||
# List of custom or override parsers.
|
||||
# Allow any <user_data_dir>/jc/jcparsers/*.py
|
||||
local_parsers = []
|
||||
data_dir = appdirs.user_data_dir('jc', 'jc')
|
||||
local_parsers_dir = os.path.join(data_dir, 'jcparsers')
|
||||
if os.path.isdir(local_parsers_dir):
|
||||
sys.path.append(data_dir)
|
||||
for name in os.listdir(local_parsers_dir):
|
||||
if re.match(r'\w+\.py', name) and os.path.isfile(os.path.join(local_parsers_dir, name)):
|
||||
plugin_name = name[0:-3]
|
||||
local_parsers.append(plugin_name)
|
||||
if plugin_name not in parsers:
|
||||
parsers.append(plugin_name)
|
||||
|
||||
|
||||
# We only support 2.3.0+, pygments changed color names in 2.4.0.
|
||||
# startswith is sufficient and avoids potential exceptions from split and int.
|
||||
if pygments.__version__.startswith('2.3.'):
|
||||
PYGMENT_COLOR = {
|
||||
'black': '#ansiblack',
|
||||
'red': '#ansidarkred',
|
||||
'green': '#ansidarkgreen',
|
||||
'yellow': '#ansibrown',
|
||||
'blue': '#ansidarkblue',
|
||||
'magenta': '#ansipurple',
|
||||
'cyan': '#ansiteal',
|
||||
'gray': '#ansilightgray',
|
||||
'brightblack': '#ansidarkgray',
|
||||
'brightred': '#ansired',
|
||||
'brightgreen': '#ansigreen',
|
||||
'brightyellow': '#ansiyellow',
|
||||
'brightblue': '#ansiblue',
|
||||
'brightmagenta': '#ansifuchsia',
|
||||
'brightcyan': '#ansiturquoise',
|
||||
'white': '#ansiwhite',
|
||||
}
|
||||
else:
|
||||
PYGMENT_COLOR = {
|
||||
'black': 'ansiblack',
|
||||
'red': 'ansired',
|
||||
'green': 'ansigreen',
|
||||
'yellow': 'ansiyellow',
|
||||
'blue': 'ansiblue',
|
||||
'magenta': 'ansimagenta',
|
||||
'cyan': 'ansicyan',
|
||||
'gray': 'ansigray',
|
||||
'brightblack': 'ansibrightblack',
|
||||
'brightred': 'ansibrightred',
|
||||
'brightgreen': 'ansibrightgreen',
|
||||
'brightyellow': 'ansibrightyellow',
|
||||
'brightblue': 'ansibrightblue',
|
||||
'brightmagenta': 'ansibrightmagenta',
|
||||
'brightcyan': 'ansibrightcyan',
|
||||
'white': 'ansiwhite',
|
||||
}
|
||||
|
||||
|
||||
def set_env_colors(env_colors=None):
|
||||
"""
|
||||
Return a dictionary to be used in Pygments custom style class.
|
||||
|
||||
Grab custom colors from JC_COLORS environment variable. JC_COLORS env variable takes 4 comma
|
||||
separated string values and should be in the format of:
|
||||
|
||||
JC_COLORS=<keyname_color>,<keyword_color>,<number_color>,<string_color>
|
||||
|
||||
Where colors are: black, red, green, yellow, blue, magenta, cyan, gray, brightblack, brightred,
|
||||
brightgreen, brightyellow, brightblue, brightmagenta, brightcyan, white, default
|
||||
|
||||
Default colors:
|
||||
|
||||
JC_COLORS=blue,brightblack,magenta,green
|
||||
or
|
||||
JC_COLORS=default,default,default,default
|
||||
|
||||
"""
|
||||
input_error = False
|
||||
|
||||
if env_colors:
|
||||
color_list = env_colors.split(',')
|
||||
else:
|
||||
color_list = ['default', 'default', 'default', 'default']
|
||||
|
||||
if len(color_list) != 4:
|
||||
input_error = True
|
||||
|
||||
for color in color_list:
|
||||
if color != 'default' and color not in PYGMENT_COLOR:
|
||||
input_error = True
|
||||
|
||||
# if there is an issue with the env variable, just set all colors to default and move on
|
||||
if input_error:
|
||||
print('jc: Warning: could not parse JC_COLORS environment variable\n', file=sys.stderr)
|
||||
color_list = ['default', 'default', 'default', 'default']
|
||||
|
||||
# Try the color set in the JC_COLORS env variable first. If it is set to default, then fall back to default colors
|
||||
return {
|
||||
Name.Tag: f'bold {PYGMENT_COLOR[color_list[0]]}' if not color_list[0] == 'default' else f"bold {PYGMENT_COLOR['blue']}", # key names
|
||||
Keyword: PYGMENT_COLOR[color_list[1]] if not color_list[1] == 'default' else PYGMENT_COLOR['brightblack'], # true, false, null
|
||||
Number: PYGMENT_COLOR[color_list[2]] if not color_list[2] == 'default' else PYGMENT_COLOR['magenta'], # numbers
|
||||
String: PYGMENT_COLOR[color_list[3]] if not color_list[3] == 'default' else PYGMENT_COLOR['green'] # strings
|
||||
}
|
||||
|
||||
|
||||
def piped_output():
|
||||
"""Return False if stdout is a TTY. True if output is being piped to another program"""
|
||||
if sys.stdout.isatty():
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
|
||||
def ctrlc(signum, frame):
|
||||
"""exit with error on SIGINT"""
|
||||
"""Exit with error on SIGINT"""
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def parser_shortname(parser_argument):
|
||||
"""short name of the parser with dashes and no -- prefix"""
|
||||
"""Return short name of the parser with dashes and no -- prefix"""
|
||||
return parser_argument[2:]
|
||||
|
||||
|
||||
def parser_argument(parser):
|
||||
"""short name of the parser with dashes and with -- prefix"""
|
||||
"""Return short name of the parser with dashes and with -- prefix"""
|
||||
return f'--{parser}'
|
||||
|
||||
|
||||
def parser_mod_shortname(parser):
|
||||
"""short name of the parser's module name (no -- prefix and dashes converted to underscores)"""
|
||||
"""Return short name of the parser's module name (no -- prefix and dashes converted to underscores)"""
|
||||
return parser.replace('--', '').replace('-', '_')
|
||||
|
||||
|
||||
def parser_module(parser):
|
||||
"""import the module just in time and return the module object"""
|
||||
importlib.import_module('jc.parsers.' + parser_mod_shortname(parser))
|
||||
return getattr(jc.parsers, parser_mod_shortname(parser))
|
||||
"""Import the module just in time and return the module object"""
|
||||
shortname = parser_mod_shortname(parser)
|
||||
path = ('jcparsers.' if shortname in local_parsers else 'jc.parsers.')
|
||||
return importlib.import_module(path + shortname)
|
||||
|
||||
|
||||
def parsers_text(indent=0, pad=0):
|
||||
"""return the argument and description information from each parser"""
|
||||
"""Return the argument and description information from each parser"""
|
||||
ptext = ''
|
||||
for parser in parsers:
|
||||
parser_arg = parser_argument(parser)
|
||||
@@ -107,7 +245,7 @@ def parsers_text(indent=0, pad=0):
|
||||
|
||||
|
||||
def about_jc():
|
||||
"""return jc info and the contents of each parser.info as a dictionary"""
|
||||
"""Return jc info and the contents of each parser.info as a dictionary"""
|
||||
parser_list = []
|
||||
|
||||
for parser in parsers:
|
||||
@@ -137,7 +275,7 @@ def about_jc():
|
||||
|
||||
|
||||
def helptext(message):
|
||||
"""return the help text with the list of parsers"""
|
||||
"""Return the help text with the list of parsers"""
|
||||
parsers_string = parsers_text(indent=12, pad=17)
|
||||
|
||||
helptext_string = f'''
|
||||
@@ -145,10 +283,6 @@ def helptext(message):
|
||||
|
||||
Usage: COMMAND | jc PARSER [OPTIONS]
|
||||
|
||||
or
|
||||
|
||||
COMMAND | jc [OPTIONS] PARSER
|
||||
|
||||
or magic syntax:
|
||||
|
||||
jc [OPTIONS] COMMAND
|
||||
@@ -157,7 +291,8 @@ def helptext(message):
|
||||
{parsers_string}
|
||||
Options:
|
||||
-a about jc
|
||||
-d debug - show trace messages
|
||||
-d debug - show traceback (-dd for verbose traceback)
|
||||
-m monochrome output
|
||||
-p pretty print output
|
||||
-q quiet - suppress warnings
|
||||
-r raw JSON output
|
||||
@@ -169,153 +304,167 @@ def helptext(message):
|
||||
|
||||
jc -p ls -al
|
||||
'''
|
||||
print(textwrap.dedent(helptext_string), file=sys.stderr)
|
||||
return textwrap.dedent(helptext_string)
|
||||
|
||||
|
||||
def json_out(data, pretty=False):
|
||||
if pretty:
|
||||
print(json.dumps(data, indent=2))
|
||||
def json_out(data, pretty=False, env_colors=None, mono=False, piped_out=False):
|
||||
"""Return a JSON formatted string. String may include color codes or be pretty printed."""
|
||||
if not mono and not piped_out:
|
||||
# set colors
|
||||
class JcStyle(Style):
|
||||
styles = set_env_colors(env_colors)
|
||||
|
||||
if pretty:
|
||||
return str(highlight(json.dumps(data, indent=2), JsonLexer(), Terminal256Formatter(style=JcStyle))[0:-1])
|
||||
else:
|
||||
return str(highlight(json.dumps(data), JsonLexer(), Terminal256Formatter(style=JcStyle))[0:-1])
|
||||
else:
|
||||
print(json.dumps(data))
|
||||
if pretty:
|
||||
return json.dumps(data, indent=2)
|
||||
else:
|
||||
return json.dumps(data)
|
||||
|
||||
|
||||
def generate_magic_command(args):
|
||||
"""
|
||||
Return a tuple with a boolean and a command, where the boolean signifies that
|
||||
the command is valid, and the command is either a command string or None.
|
||||
"""
|
||||
|
||||
# Parse with magic syntax: jc -p ls -al
|
||||
if len(args) <= 1 or args[1].startswith('--'):
|
||||
return False, None
|
||||
|
||||
# correctly parse escape characters and spaces with shlex
|
||||
args_given = ' '.join(map(shlex.quote, args[1:])).split()
|
||||
options = []
|
||||
|
||||
# find the options
|
||||
for arg in list(args_given):
|
||||
# parser found - use standard syntax
|
||||
if arg.startswith('--'):
|
||||
return False, None
|
||||
|
||||
# option found - populate option list
|
||||
elif arg.startswith('-'):
|
||||
options.extend(args_given.pop(0)[1:])
|
||||
|
||||
# command found if iterator didn't already stop - stop iterating
|
||||
else:
|
||||
break
|
||||
|
||||
# all options popped and no command found - for case like 'jc -a'
|
||||
if len(args_given) == 0:
|
||||
return False, None
|
||||
|
||||
magic_dict = {}
|
||||
parser_info = about_jc()['parsers']
|
||||
|
||||
# create a dictionary of magic_commands to their respective parsers.
|
||||
for entry in parser_info:
|
||||
# Update the dict with all of the magic commands for this parser, if they exist.
|
||||
magic_dict.update({mc: entry['argument'] for mc in entry.get('magic_commands', [])})
|
||||
|
||||
# find the command and parser
|
||||
one_word_command = args_given[0]
|
||||
two_word_command = ' '.join(args_given[0:2])
|
||||
|
||||
# try to get a parser for two_word_command, otherwise get one for one_word_command
|
||||
found_parser = magic_dict.get(two_word_command, magic_dict.get(one_word_command))
|
||||
|
||||
# construct a new command line using the standard syntax: COMMAND | jc --PARSER -OPTIONS
|
||||
run_command = ' '.join(args_given)
|
||||
if found_parser:
|
||||
cmd_options = ('-' + ''.join(options)) if options else ''
|
||||
return True, ' '.join([run_command, '|', 'jc', found_parser, cmd_options])
|
||||
else:
|
||||
return False, run_command
|
||||
|
||||
|
||||
def magic():
|
||||
"""Parse with magic syntax: jc -p ls -al"""
|
||||
if len(sys.argv) > 1 and not sys.argv[1].startswith('--'):
|
||||
parser_info = about_jc()['parsers']
|
||||
# correctly parse escape characters and spaces with shlex
|
||||
args_given = " ".join(map(shlex.quote, sys.argv[1:])).split()
|
||||
options = []
|
||||
found_parser = None
|
||||
|
||||
# find the options
|
||||
if args_given[0].startswith('-'):
|
||||
p = 0
|
||||
for i, arg in list(enumerate(args_given)):
|
||||
# parser found - use standard syntax
|
||||
if arg.startswith('--'):
|
||||
return
|
||||
# option found - populate option list
|
||||
elif arg.startswith('-'):
|
||||
options.append(args_given.pop(i - p)[1:])
|
||||
p = p + 1
|
||||
# command found if iterator didn't already stop - stop iterating
|
||||
else:
|
||||
break
|
||||
|
||||
# find the command and parser
|
||||
for parser in parser_info:
|
||||
if 'magic_commands' in parser:
|
||||
# first pass for two word commands: e.g. 'pip list'
|
||||
for magic_command in parser['magic_commands']:
|
||||
try:
|
||||
if ' '.join(args_given[0:2]) == magic_command:
|
||||
found_parser = parser['argument']
|
||||
break
|
||||
# No command found - go to next loop (for cases like 'jc -a')
|
||||
except Exception:
|
||||
break
|
||||
|
||||
# second pass for one word commands: e.g. 'ls'
|
||||
if not found_parser:
|
||||
for magic_command in parser['magic_commands']:
|
||||
try:
|
||||
if args_given[0] == magic_command:
|
||||
found_parser = parser['argument']
|
||||
break
|
||||
# No command found - use standard syntax (for cases like 'jc -a')
|
||||
except Exception:
|
||||
return
|
||||
|
||||
# construct a new command line using the standard syntax: COMMAND | jc --PARSER -OPTIONS
|
||||
run_command = ' '.join(args_given)
|
||||
if found_parser:
|
||||
if options:
|
||||
cmd_options = '-' + ''.join(options)
|
||||
else:
|
||||
cmd_options = ''
|
||||
whole_command = ' '.join([run_command, '|', 'jc', found_parser, cmd_options])
|
||||
|
||||
os.system(whole_command)
|
||||
exit()
|
||||
else:
|
||||
helptext(f'parser not found for "{run_command}"')
|
||||
sys.exit(1)
|
||||
"""Runs the command generated by generate_magic_command() to support magic syntax"""
|
||||
valid_command, run_command = generate_magic_command(sys.argv)
|
||||
if valid_command:
|
||||
os.system(run_command)
|
||||
sys.exit(0)
|
||||
elif run_command is None:
|
||||
return
|
||||
else:
|
||||
print(helptext(f'parser not found for "{run_command}"'), file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
|
||||
def main():
|
||||
# break on ctrl-c keyboard interrupt
|
||||
signal.signal(signal.SIGINT, ctrlc)
|
||||
|
||||
# break on pipe error. need try/except for windows compatibility
|
||||
try:
|
||||
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
|
||||
except AttributeError:
|
||||
pass
|
||||
|
||||
jc_colors = os.getenv('JC_COLORS')
|
||||
|
||||
# try magic syntax first: e.g. jc -p ls -al
|
||||
magic()
|
||||
|
||||
options = []
|
||||
debug = False
|
||||
pretty = False
|
||||
quiet = False
|
||||
raw = False
|
||||
|
||||
# options
|
||||
for opt in sys.argv:
|
||||
if opt.startswith('-') and not opt.startswith('--'):
|
||||
for flag in opt[1:]:
|
||||
options.append(flag)
|
||||
options.extend(opt[1:])
|
||||
|
||||
if 'd' in options:
|
||||
debug = True
|
||||
debug = 'd' in options
|
||||
verbose_debug = True if options.count('d') > 1 else False
|
||||
mono = 'm' in options
|
||||
pretty = 'p' in options
|
||||
quiet = 'q' in options
|
||||
raw = 'r' in options
|
||||
|
||||
if 'p' in options:
|
||||
pretty = True
|
||||
|
||||
if 'q' in options:
|
||||
quiet = True
|
||||
|
||||
if 'r' in options:
|
||||
raw = True
|
||||
if verbose_debug:
|
||||
import jc.tracebackplus
|
||||
jc.tracebackplus.enable(context=11)
|
||||
|
||||
if 'a' in options:
|
||||
json_out(about_jc(), pretty=pretty)
|
||||
exit()
|
||||
print(json_out(about_jc(), pretty=pretty, env_colors=jc_colors, mono=mono, piped_out=piped_output()))
|
||||
sys.exit(0)
|
||||
|
||||
if sys.stdin.isatty():
|
||||
helptext('missing piped data')
|
||||
print(helptext('missing piped data'), file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
data = sys.stdin.read()
|
||||
|
||||
found = False
|
||||
|
||||
if debug:
|
||||
for arg in sys.argv:
|
||||
parser_name = parser_shortname(arg)
|
||||
for arg in sys.argv:
|
||||
parser_name = parser_shortname(arg)
|
||||
|
||||
if parser_name in parsers:
|
||||
# load parser module just in time so we don't need to load all modules
|
||||
parser = parser_module(arg)
|
||||
if parser_name in parsers:
|
||||
# load parser module just in time so we don't need to load all modules
|
||||
parser = parser_module(arg)
|
||||
try:
|
||||
result = parser.parse(data, raw=raw, quiet=quiet)
|
||||
found = True
|
||||
break
|
||||
else:
|
||||
for arg in sys.argv:
|
||||
parser_name = parser_shortname(arg)
|
||||
|
||||
if parser_name in parsers:
|
||||
# load parser module just in time so we don't need to load all modules
|
||||
parser = parser_module(arg)
|
||||
try:
|
||||
result = parser.parse(data, raw=raw, quiet=quiet)
|
||||
found = True
|
||||
break
|
||||
except Exception:
|
||||
jc.utils.error_message(f'{parser_name} parser could not parse the input data. Did you use the correct parser?\n For details use the -d option.')
|
||||
except Exception:
|
||||
if debug:
|
||||
raise
|
||||
else:
|
||||
import jc.utils
|
||||
jc.utils.error_message(
|
||||
f'{parser_name} parser could not parse the input data. Did you use the correct parser?\n'
|
||||
' For details use the -d or -dd option.')
|
||||
sys.exit(1)
|
||||
|
||||
if not found:
|
||||
helptext('missing or incorrect arguments')
|
||||
print(helptext('missing or incorrect arguments'), file=sys.stderr)
|
||||
sys.exit(1)
|
||||
|
||||
json_out(result, pretty=pretty)
|
||||
print(json_out(result, pretty=pretty, env_colors=jc_colors, mono=mono, piped_out=piped_output()))
|
||||
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
||||
143
jc/parsers/airport.py
Normal file
143
jc/parsers/airport.py
Normal file
@@ -0,0 +1,143 @@
|
||||
"""jc - JSON CLI output utility airport -I Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --airport as the first argument if the piped input is coming from airport -I (OSX)
|
||||
|
||||
This program can be found at:
|
||||
/System/Library/PrivateFrameworks/Apple80211.framework/Versions/Current/Resources/airport
|
||||
|
||||
Compatibility:
|
||||
|
||||
'darwin'
|
||||
|
||||
Examples:
|
||||
|
||||
$ airport -I | jc --airport -p
|
||||
{
|
||||
"agrctlrssi": -66,
|
||||
"agrextrssi": 0,
|
||||
"agrctlnoise": -90,
|
||||
"agrextnoise": 0,
|
||||
"state": "running",
|
||||
"op_mode": "station",
|
||||
"lasttxrate": 195,
|
||||
"maxrate": 867,
|
||||
"lastassocstatus": 0,
|
||||
"802_11_auth": "open",
|
||||
"link_auth": "wpa2-psk",
|
||||
"bssid": "3c:37:86:15:ad:f9",
|
||||
"ssid": "SnazzleDazzle",
|
||||
"mcs": 0,
|
||||
"channel": "48,80"
|
||||
}
|
||||
|
||||
$ airport -I | jc --airport -p -r
|
||||
{
|
||||
"agrctlrssi": "-66",
|
||||
"agrextrssi": "0",
|
||||
"agrctlnoise": "-90",
|
||||
"agrextnoise": "0",
|
||||
"state": "running",
|
||||
"op_mode": "station",
|
||||
"lasttxrate": "195",
|
||||
"maxrate": "867",
|
||||
"lastassocstatus": "0",
|
||||
"802_11_auth": "open",
|
||||
"link_auth": "wpa2-psk",
|
||||
"bssid": "3c:37:86:15:ad:f9",
|
||||
"ssid": "SnazzleDazzle",
|
||||
"mcs": "0",
|
||||
"channel": "48,80"
|
||||
}
|
||||
"""
|
||||
import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.1'
|
||||
description = 'airport -I command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
# details = 'enter any other details here'
|
||||
|
||||
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
|
||||
compatible = ['darwin']
|
||||
magic_commands = ['airport -I']
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
|
||||
def process(proc_data):
|
||||
"""
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Structured data with the following schema:
|
||||
|
||||
{
|
||||
"agrctlrssi": integer,
|
||||
"agrextrssi": integer,
|
||||
"agrctlnoise": integer,
|
||||
"agrextnoise": integer,
|
||||
"state": string,
|
||||
"op_mode": string,
|
||||
"lasttxrate": integer,
|
||||
"maxrate": integer,
|
||||
"lastassocstatus": integer,
|
||||
"802_11_auth": string,
|
||||
"link_auth": string,
|
||||
"bssid": string,
|
||||
"ssid": string,
|
||||
"mcs": integer,
|
||||
"channel": string
|
||||
}
|
||||
"""
|
||||
# integer changes
|
||||
int_list = ['agrctlrssi', 'agrextrssi', 'agrctlnoise', 'agrextnoise',
|
||||
'lasttxrate', 'maxrate', 'lastassocstatus', 'mcs']
|
||||
for key in proc_data:
|
||||
if key in int_list:
|
||||
try:
|
||||
proc_data[key] = int(proc_data[key])
|
||||
except (ValueError):
|
||||
proc_data[key] = None
|
||||
|
||||
return proc_data
|
||||
|
||||
|
||||
def parse(data, raw=False, quiet=False):
|
||||
"""
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Raw or processed structured data.
|
||||
"""
|
||||
if not quiet:
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
raw_output = {}
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
for line in filter(None, data.splitlines()):
|
||||
linedata = line.split(':', maxsplit=1)
|
||||
raw_output[linedata[0].strip().lower().replace(' ', '_').replace('.', '_')] = linedata[1].strip()
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
else:
|
||||
return process(raw_output)
|
||||
188
jc/parsers/airport_s.py
Normal file
188
jc/parsers/airport_s.py
Normal file
@@ -0,0 +1,188 @@
|
||||
"""jc - JSON CLI output utility airport -s Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --airport as the first argument if the piped input is coming from airport -s (OSX)
|
||||
|
||||
This program can be found at:
|
||||
/System/Library/PrivateFrameworks/Apple80211.framework/Versions/Current/Resources/airport
|
||||
|
||||
Compatibility:
|
||||
|
||||
'darwin'
|
||||
|
||||
Examples:
|
||||
|
||||
$ airport -s | jc --airport-s -p
|
||||
[
|
||||
{
|
||||
"ssid": "DIRECT-4A-HP OfficeJet 3830",
|
||||
"bssid": "00:67:eb:2a:a7:3b",
|
||||
"rssi": -90,
|
||||
"channel": "6",
|
||||
"ht": true,
|
||||
"cc": "--",
|
||||
"security": [
|
||||
"WPA2(PSK/AES/AES)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"ssid": "Latitude38",
|
||||
"bssid": "c0:ff:d5:d2:7a:f3",
|
||||
"rssi": -85,
|
||||
"channel": "11",
|
||||
"ht": true,
|
||||
"cc": "US",
|
||||
"security": [
|
||||
"WPA2(PSK/AES/AES)"
|
||||
]
|
||||
},
|
||||
{
|
||||
"ssid": "xfinitywifi",
|
||||
"bssid": "6e:e3:0e:b8:45:99",
|
||||
"rssi": -83,
|
||||
"channel": "11",
|
||||
"ht": true,
|
||||
"cc": "US",
|
||||
"security": [
|
||||
"NONE"
|
||||
]
|
||||
},
|
||||
...
|
||||
]
|
||||
|
||||
$ airport -s | jc --airport -p -r
|
||||
[
|
||||
{
|
||||
"ssid": "DIRECT-F3-HP ENVY 5660 series",
|
||||
"bssid": "b0:5a:da:6f:0a:d4",
|
||||
"rssi": "-93",
|
||||
"channel": "1",
|
||||
"ht": "Y",
|
||||
"cc": "--",
|
||||
"security": "WPA2(PSK/AES/AES)"
|
||||
},
|
||||
{
|
||||
"ssid": "YouAreInfected-5",
|
||||
"bssid": "5c:e3:0e:c2:85:da",
|
||||
"rssi": "-85",
|
||||
"channel": "36",
|
||||
"ht": "Y",
|
||||
"cc": "US",
|
||||
"security": "WPA(PSK/AES,TKIP/TKIP) WPA2(PSK/AES,TKIP/TKIP)"
|
||||
},
|
||||
{
|
||||
"ssid": "YuanFamily",
|
||||
"bssid": "5c:e3:0e:b8:5f:9a",
|
||||
"rssi": "-84",
|
||||
"channel": "11",
|
||||
"ht": "Y",
|
||||
"cc": "US",
|
||||
"security": "WPA(PSK/AES,TKIP/TKIP) WPA2(PSK/AES,TKIP/TKIP)"
|
||||
},
|
||||
...
|
||||
]
|
||||
"""
|
||||
import jc.utils
|
||||
import jc.parsers.universal
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.2'
|
||||
description = 'airport -s command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
# details = 'enter any other details here'
|
||||
|
||||
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
|
||||
compatible = ['darwin']
|
||||
magic_commands = ['airport -s']
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
|
||||
def process(proc_data):
|
||||
"""
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Structured data with the following schema:
|
||||
[
|
||||
{
|
||||
"ssid": string,
|
||||
"bssid": string,
|
||||
"rssi": integer,
|
||||
"channel": string,
|
||||
"ht": boolean,
|
||||
"cc": string,
|
||||
"security": [
|
||||
string,
|
||||
]
|
||||
}
|
||||
]
|
||||
"""
|
||||
for entry in proc_data:
|
||||
|
||||
# integers
|
||||
int_list = ['rssi']
|
||||
for key in int_list:
|
||||
if key in entry:
|
||||
try:
|
||||
entry[key] = int(entry[key])
|
||||
except (ValueError):
|
||||
entry[key] = None
|
||||
|
||||
# booleans
|
||||
bool_list = ['ht']
|
||||
for key in entry:
|
||||
if key in bool_list:
|
||||
try:
|
||||
entry[key] = True if entry[key] == 'Y' else False
|
||||
except (ValueError):
|
||||
entry[key] = None
|
||||
|
||||
if 'security' in entry:
|
||||
entry['security'] = entry['security'].split()
|
||||
|
||||
return proc_data
|
||||
|
||||
|
||||
def parse(data, raw=False, quiet=False):
|
||||
"""
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Raw or processed structured data.
|
||||
"""
|
||||
if not quiet:
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
raw_output = []
|
||||
cleandata = list(filter(None, data.splitlines()))
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
# fix headers
|
||||
cleandata[0] = cleandata[0].lower()
|
||||
cleandata[0] = cleandata[0].replace('-', '_')
|
||||
cleandata[0] = cleandata[0].replace('security (auth/unicast/group)', 'security')
|
||||
|
||||
# parse the data
|
||||
raw_output = jc.parsers.universal.sparse_table_parse(cleandata)
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
else:
|
||||
return process(raw_output)
|
||||
@@ -58,6 +58,8 @@ Examples:
|
||||
"hwtype": "ether",
|
||||
"hwaddress": "00:50:56:f0:98:26",
|
||||
"iface": "ens33"
|
||||
"permanent": false,
|
||||
"expires": 1182
|
||||
},
|
||||
{
|
||||
"name": "gateway",
|
||||
@@ -65,6 +67,8 @@ Examples:
|
||||
"hwtype": "ether",
|
||||
"hwaddress": "00:50:56:f7:4a:fc",
|
||||
"iface": "ens33"
|
||||
"permanent": false,
|
||||
"expires": 110
|
||||
}
|
||||
]
|
||||
|
||||
@@ -76,6 +80,8 @@ Examples:
|
||||
"hwtype": "ether",
|
||||
"hwaddress": "00:50:56:fe:7a:b4",
|
||||
"iface": "ens33"
|
||||
"permanent": false,
|
||||
"expires": "1182"
|
||||
},
|
||||
{
|
||||
"name": "_gateway",
|
||||
@@ -83,6 +89,8 @@ Examples:
|
||||
"hwtype": "ether",
|
||||
"hwaddress": "00:50:56:f7:4a:fc",
|
||||
"iface": "ens33"
|
||||
"permanent": false,
|
||||
"expires": "110"
|
||||
}
|
||||
]
|
||||
"""
|
||||
@@ -91,7 +99,7 @@ import jc.parsers.universal
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.1'
|
||||
version = '1.6'
|
||||
description = 'arp command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -123,7 +131,9 @@ def process(proc_data):
|
||||
"hwtype": string,
|
||||
"hwaddress": string,
|
||||
"flags_mask": string,
|
||||
"iface": string
|
||||
"iface": string,
|
||||
"permanent": boolean,
|
||||
"expires": integer
|
||||
}
|
||||
]
|
||||
"""
|
||||
@@ -133,6 +143,14 @@ def process(proc_data):
|
||||
if 'name' in entry and entry['name'] == '?':
|
||||
entry['name'] = None
|
||||
|
||||
int_list = ['expires']
|
||||
for key in int_list:
|
||||
if key in entry:
|
||||
try:
|
||||
entry[key] = int(entry[key])
|
||||
except (ValueError):
|
||||
entry[key] = None
|
||||
|
||||
return proc_data
|
||||
|
||||
|
||||
@@ -153,58 +171,65 @@ def parse(data, raw=False, quiet=False):
|
||||
if not quiet:
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
cleandata = data.splitlines()
|
||||
raw_output = []
|
||||
cleandata = list(filter(None, data.splitlines()))
|
||||
|
||||
# remove final Entries row if -v was used
|
||||
if cleandata[-1].find('Entries:') == 0:
|
||||
cleandata.pop(-1)
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
# detect if osx style was used
|
||||
if cleandata[0].find(' ifscope ') != -1:
|
||||
raw_output = []
|
||||
for line in cleandata:
|
||||
line = line.split()
|
||||
output_line = {}
|
||||
output_line['name'] = line[0]
|
||||
output_line['address'] = line[1].lstrip('(').rstrip(')')
|
||||
output_line['hwtype'] = line[-1].lstrip('[').rstrip(']')
|
||||
output_line['hwaddress'] = line[3]
|
||||
output_line['iface'] = line[5]
|
||||
raw_output.append(output_line)
|
||||
# remove final Entries row if -v was used
|
||||
if cleandata[-1].startswith('Entries:'):
|
||||
cleandata.pop(-1)
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
# detect if freebsd/osx style was used
|
||||
if cleandata[0][-1] == ']':
|
||||
for line in cleandata:
|
||||
splitline = line.split()
|
||||
output_line = {
|
||||
'name': splitline[0],
|
||||
'address': splitline[1].lstrip('(').rstrip(')'),
|
||||
'hwtype': splitline[-1].lstrip('[').rstrip(']'),
|
||||
'hwaddress': splitline[3],
|
||||
'iface': splitline[5]
|
||||
}
|
||||
|
||||
if 'permanent' in splitline:
|
||||
output_line['permanent'] = True
|
||||
else:
|
||||
output_line['permanent'] = False
|
||||
|
||||
if 'expires' in splitline:
|
||||
output_line['expires'] = splitline[-3]
|
||||
|
||||
raw_output.append(output_line)
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
else:
|
||||
return process(raw_output)
|
||||
|
||||
# detect if linux style was used
|
||||
elif cleandata[0].startswith('Address'):
|
||||
|
||||
# fix header row to change Flags Mask to flags_mask
|
||||
cleandata[0] = cleandata[0].replace('Flags Mask', 'flags_mask')
|
||||
cleandata[0] = cleandata[0].lower()
|
||||
|
||||
raw_output = jc.parsers.universal.simple_table_parse(cleandata)
|
||||
|
||||
# otherwise, try bsd style
|
||||
else:
|
||||
return process(raw_output)
|
||||
for line in cleandata:
|
||||
line = line.split()
|
||||
output_line = {
|
||||
'name': line[0],
|
||||
'address': line[1].lstrip('(').rstrip(')'),
|
||||
'hwtype': line[4].lstrip('[').rstrip(']'),
|
||||
'hwaddress': line[3],
|
||||
'iface': line[6],
|
||||
}
|
||||
raw_output.append(output_line)
|
||||
|
||||
# detect if linux style was used
|
||||
elif cleandata[0].find('Address') == 0:
|
||||
|
||||
# fix header row to change Flags Mask to flags_mask
|
||||
cleandata[0] = cleandata[0].replace('Flags Mask', 'flags_mask')
|
||||
cleandata[0] = cleandata[0].lower()
|
||||
|
||||
raw_output = jc.parsers.universal.simple_table_parse(cleandata)
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
else:
|
||||
return process(raw_output)
|
||||
|
||||
# otherwise, try bsd style
|
||||
if raw:
|
||||
return raw_output
|
||||
else:
|
||||
raw_output = []
|
||||
for line in cleandata:
|
||||
line = line.split()
|
||||
output_line = {}
|
||||
output_line['name'] = line[0]
|
||||
output_line['address'] = line[1].lstrip('(').rstrip(')')
|
||||
output_line['hwtype'] = line[4].lstrip('[').rstrip(']')
|
||||
output_line['hwaddress'] = line[3]
|
||||
output_line['iface'] = line[6]
|
||||
raw_output.append(output_line)
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
else:
|
||||
return process(raw_output)
|
||||
return process(raw_output)
|
||||
|
||||
219
jc/parsers/blkid.py
Normal file
219
jc/parsers/blkid.py
Normal file
@@ -0,0 +1,219 @@
|
||||
"""jc - JSON CLI output utility blkid Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --blkid as the first argument if the piped input is coming from blkid
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux'
|
||||
|
||||
Examples:
|
||||
|
||||
$ blkid | jc --blkid -p
|
||||
[
|
||||
{
|
||||
"device": "/dev/sda1",
|
||||
"uuid": "05d927ab-5875-49e4-ada1-7f46cb32c932",
|
||||
"type": "xfs"
|
||||
},
|
||||
{
|
||||
"device": "/dev/sda2",
|
||||
"uuid": "3klkIj-w1kk-DkJi-0XBJ-y3i7-i2Ac-vHqWBM",
|
||||
"type": "LVM2_member"
|
||||
},
|
||||
{
|
||||
"device": "/dev/mapper/centos-root",
|
||||
"uuid": "07d718ff-950c-4e5b-98f0-42a1147c77d9",
|
||||
"type": "xfs"
|
||||
},
|
||||
{
|
||||
"device": "/dev/mapper/centos-swap",
|
||||
"uuid": "615eb89a-bcbf-46fd-80e3-c483ff5c931f",
|
||||
"type": "swap"
|
||||
}
|
||||
]
|
||||
|
||||
$ sudo blkid -o udev -ip /dev/sda2 | jc --blkid -p
|
||||
[
|
||||
{
|
||||
"id_fs_uuid": "3klkIj-w1kk-DkJi-0XBJ-y3i7-i2Ac-vHqWBM",
|
||||
"id_fs_uuid_enc": "3klkIj-w1kk-DkJi-0XBJ-y3i7-i2Ac-vHqWBM",
|
||||
"id_fs_version": "LVM2\\x20001",
|
||||
"id_fs_type": "LVM2_member",
|
||||
"id_fs_usage": "raid",
|
||||
"id_iolimit_minimum_io_size": 512,
|
||||
"id_iolimit_physical_sector_size": 512,
|
||||
"id_iolimit_logical_sector_size": 512,
|
||||
"id_part_entry_scheme": "dos",
|
||||
"id_part_entry_type": "0x8e",
|
||||
"id_part_entry_number": 2,
|
||||
"id_part_entry_offset": 2099200,
|
||||
"id_part_entry_size": 39843840,
|
||||
"id_part_entry_disk": "8:0"
|
||||
}
|
||||
]
|
||||
|
||||
$ sudo blkid -ip /dev/sda1 | jc --blkid -p -r
|
||||
[
|
||||
{
|
||||
"devname": "/dev/sda1",
|
||||
"uuid": "05d927bb-5875-49e3-ada1-7f46cb31c932",
|
||||
"type": "xfs",
|
||||
"usage": "filesystem",
|
||||
"minimum_io_size": "512",
|
||||
"physical_sector_size": "512",
|
||||
"logical_sector_size": "512",
|
||||
"part_entry_scheme": "dos",
|
||||
"part_entry_type": "0x83",
|
||||
"part_entry_flags": "0x80",
|
||||
"part_entry_number": "1",
|
||||
"part_entry_offset": "2048",
|
||||
"part_entry_size": "2097152",
|
||||
"part_entry_disk": "8:0"
|
||||
}
|
||||
]
|
||||
"""
|
||||
import shlex
|
||||
import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.2'
|
||||
description = 'blkid command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
# details = 'enter any other details here'
|
||||
|
||||
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
|
||||
compatible = ['linux']
|
||||
magic_commands = ['blkid']
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
|
||||
def process(proc_data):
|
||||
"""
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Structured data with the following schema:
|
||||
|
||||
[
|
||||
{
|
||||
"device": string,
|
||||
"uuid": string,
|
||||
"type": string,
|
||||
"usage": string,
|
||||
"part_entry_scheme": string,
|
||||
"part_entry_type": string,
|
||||
"part_entry_flags": string,
|
||||
"part_entry_number": integer,
|
||||
"part_entry_offset": integer,
|
||||
"part_entry_size": integer,
|
||||
"part_entry_disk": string,
|
||||
"id_fs_uuid": string,
|
||||
"id_fs_uuid_enc": string,
|
||||
"id_fs_version": string,
|
||||
"id_fs_type": string,
|
||||
"id_fs_usage": string,
|
||||
"id_part_entry_scheme": string,
|
||||
"id_part_entry_type": string,
|
||||
"id_part_entry_flags": string,
|
||||
"id_part_entry_number": integer,
|
||||
"id_part_entry_offset": integer,
|
||||
"id_part_entry_size": integer,
|
||||
"id_iolimit_minimum_io_size": integer,
|
||||
"id_iolimit_physical_sector_size": integer,
|
||||
"id_iolimit_logical_sector_size": integer,
|
||||
"id_part_entry_disk": string,
|
||||
"minimum_io_size": integer,
|
||||
"physical_sector_size": integer,
|
||||
"logical_sector_size": integer
|
||||
}
|
||||
]
|
||||
"""
|
||||
for entry in proc_data:
|
||||
if 'devname' in entry:
|
||||
entry['device'] = entry.pop('devname')
|
||||
|
||||
int_list = ['part_entry_number', 'part_entry_offset', 'part_entry_size', 'id_part_entry_number',
|
||||
'id_part_entry_offset', 'id_part_entry_size', 'minimum_io_size', 'physical_sector_size',
|
||||
'logical_sector_size', 'id_iolimit_minimum_io_size', 'id_iolimit_physical_sector_size',
|
||||
'id_iolimit_logical_sector_size']
|
||||
for key in int_list:
|
||||
if key in entry:
|
||||
try:
|
||||
entry[key] = int(entry[key])
|
||||
except (ValueError):
|
||||
entry[key] = None
|
||||
|
||||
return proc_data
|
||||
|
||||
|
||||
def parse(data, raw=False, quiet=False):
|
||||
"""
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Raw or processed structured data.
|
||||
"""
|
||||
if not quiet:
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
raw_output = []
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
# if the first field is a device, use normal parsing:
|
||||
if data.split(maxsplit=1)[0][-1] == ':':
|
||||
linedata = data.splitlines()
|
||||
|
||||
for line in linedata:
|
||||
output_line = {}
|
||||
entries = shlex.split(line)
|
||||
output_line['device'] = entries.pop(0)[:-1]
|
||||
|
||||
for entry in entries:
|
||||
key = entry.split('=', maxsplit=1)[0].lower()
|
||||
value = entry.split('=', maxsplit=1)[1]
|
||||
output_line[key] = value
|
||||
|
||||
raw_output.append(output_line)
|
||||
|
||||
# else use key/value per line parsing
|
||||
else:
|
||||
linedata = data.splitlines()
|
||||
output_line = {}
|
||||
for line in linedata:
|
||||
if line == '':
|
||||
if output_line:
|
||||
raw_output.append(output_line)
|
||||
output_line = {}
|
||||
continue
|
||||
continue
|
||||
|
||||
key = line.split('=', maxsplit=1)[0].lower()
|
||||
value = line.split('=', maxsplit=1)[1]
|
||||
output_line[key] = value
|
||||
|
||||
if output_line:
|
||||
raw_output.append(output_line)
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
else:
|
||||
return process(raw_output)
|
||||
@@ -132,7 +132,7 @@ import jc.parsers.universal
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.1'
|
||||
version = '1.4'
|
||||
description = 'crontab command and file parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -225,44 +225,46 @@ def parse(data, raw=False, quiet=False):
|
||||
# Clear any blank lines
|
||||
cleandata = list(filter(None, cleandata))
|
||||
|
||||
# Clear any commented lines
|
||||
for i, line in reversed(list(enumerate(cleandata))):
|
||||
if line.strip().find('#') == 0:
|
||||
cleandata.pop(i)
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
# Pop any variable assignment lines
|
||||
cron_var = []
|
||||
for i, line in reversed(list(enumerate(cleandata))):
|
||||
if line.find('=') != -1:
|
||||
var_line = cleandata.pop(i)
|
||||
var_name = var_line.split('=', maxsplit=1)[0].strip()
|
||||
var_value = var_line.split('=', maxsplit=1)[1].strip()
|
||||
cron_var.append({'name': var_name,
|
||||
'value': var_value})
|
||||
# Clear any commented lines
|
||||
for i, line in reversed(list(enumerate(cleandata))):
|
||||
if line.strip().startswith('#'):
|
||||
cleandata.pop(i)
|
||||
|
||||
raw_output['variables'] = cron_var
|
||||
# Pop any variable assignment lines
|
||||
cron_var = []
|
||||
for i, line in reversed(list(enumerate(cleandata))):
|
||||
if '=' in line:
|
||||
var_line = cleandata.pop(i)
|
||||
var_name = var_line.split('=', maxsplit=1)[0].strip()
|
||||
var_value = var_line.split('=', maxsplit=1)[1].strip()
|
||||
cron_var.append({'name': var_name,
|
||||
'value': var_value})
|
||||
|
||||
# Pop any shortcut lines
|
||||
shortcut_list = []
|
||||
for i, line in reversed(list(enumerate(cleandata))):
|
||||
if line.strip().startswith('@'):
|
||||
shortcut_line = cleandata.pop(i)
|
||||
occurrence = shortcut_line.split(maxsplit=1)[0].strip().lstrip('@')
|
||||
cmd = shortcut_line.split(maxsplit=1)[1].strip()
|
||||
shortcut_list.append({'occurrence': occurrence,
|
||||
'command': cmd})
|
||||
raw_output['variables'] = cron_var
|
||||
|
||||
# Add header row for parsing
|
||||
cleandata[:0] = ['minute hour day_of_month month day_of_week command']
|
||||
# Pop any shortcut lines
|
||||
shortcut_list = []
|
||||
for i, line in reversed(list(enumerate(cleandata))):
|
||||
if line.strip().startswith('@'):
|
||||
shortcut_line = cleandata.pop(i)
|
||||
occurrence = shortcut_line.split(maxsplit=1)[0].strip().lstrip('@')
|
||||
cmd = shortcut_line.split(maxsplit=1)[1].strip()
|
||||
shortcut_list.append({'occurrence': occurrence,
|
||||
'command': cmd})
|
||||
|
||||
if len(cleandata) > 1:
|
||||
cron_list = jc.parsers.universal.simple_table_parse(cleandata)
|
||||
# Add header row for parsing
|
||||
cleandata[:0] = ['minute hour day_of_month month day_of_week command']
|
||||
|
||||
raw_output['schedule'] = cron_list
|
||||
if len(cleandata) > 1:
|
||||
cron_list = jc.parsers.universal.simple_table_parse(cleandata)
|
||||
|
||||
# Add shortcut entries back in
|
||||
for item in shortcut_list:
|
||||
raw_output['schedule'].append(item)
|
||||
raw_output['schedule'] = cron_list
|
||||
|
||||
# Add shortcut entries back in
|
||||
for item in shortcut_list:
|
||||
raw_output['schedule'].append(item)
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
|
||||
@@ -133,7 +133,7 @@ import jc.parsers.universal
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.0'
|
||||
version = '1.3'
|
||||
description = 'crontab file parser with user support'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -226,46 +226,48 @@ def parse(data, raw=False, quiet=False):
|
||||
# Clear any blank lines
|
||||
cleandata = list(filter(None, cleandata))
|
||||
|
||||
# Clear any commented lines
|
||||
for i, line in reversed(list(enumerate(cleandata))):
|
||||
if line.strip().find('#') == 0:
|
||||
cleandata.pop(i)
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
# Pop any variable assignment lines
|
||||
cron_var = []
|
||||
for i, line in reversed(list(enumerate(cleandata))):
|
||||
if line.find('=') != -1:
|
||||
var_line = cleandata.pop(i)
|
||||
var_name = var_line.split('=', maxsplit=1)[0].strip()
|
||||
var_value = var_line.split('=', maxsplit=1)[1].strip()
|
||||
cron_var.append({'name': var_name,
|
||||
'value': var_value})
|
||||
# Clear any commented lines
|
||||
for i, line in reversed(list(enumerate(cleandata))):
|
||||
if line.strip().startswith('#'):
|
||||
cleandata.pop(i)
|
||||
|
||||
raw_output['variables'] = cron_var
|
||||
# Pop any variable assignment lines
|
||||
cron_var = []
|
||||
for i, line in reversed(list(enumerate(cleandata))):
|
||||
if '=' in line:
|
||||
var_line = cleandata.pop(i)
|
||||
var_name = var_line.split('=', maxsplit=1)[0].strip()
|
||||
var_value = var_line.split('=', maxsplit=1)[1].strip()
|
||||
cron_var.append({'name': var_name,
|
||||
'value': var_value})
|
||||
|
||||
# Pop any shortcut lines
|
||||
shortcut_list = []
|
||||
for i, line in reversed(list(enumerate(cleandata))):
|
||||
if line.strip().startswith('@'):
|
||||
shortcut_line = cleandata.pop(i)
|
||||
occurrence = shortcut_line.split(maxsplit=1)[0].strip().lstrip('@')
|
||||
usr = shortcut_line.split(maxsplit=2)[1].strip()
|
||||
cmd = shortcut_line.split(maxsplit=2)[2].strip()
|
||||
shortcut_list.append({'occurrence': occurrence,
|
||||
'user': usr,
|
||||
'command': cmd})
|
||||
raw_output['variables'] = cron_var
|
||||
|
||||
# Add header row for parsing
|
||||
cleandata[:0] = ['minute hour day_of_month month day_of_week user command']
|
||||
# Pop any shortcut lines
|
||||
shortcut_list = []
|
||||
for i, line in reversed(list(enumerate(cleandata))):
|
||||
if line.strip().startswith('@'):
|
||||
shortcut_line = cleandata.pop(i)
|
||||
occurrence = shortcut_line.split(maxsplit=1)[0].strip().lstrip('@')
|
||||
usr = shortcut_line.split(maxsplit=2)[1].strip()
|
||||
cmd = shortcut_line.split(maxsplit=2)[2].strip()
|
||||
shortcut_list.append({'occurrence': occurrence,
|
||||
'user': usr,
|
||||
'command': cmd})
|
||||
|
||||
if len(cleandata) > 1:
|
||||
cron_list = jc.parsers.universal.simple_table_parse(cleandata)
|
||||
# Add header row for parsing
|
||||
cleandata[:0] = ['minute hour day_of_month month day_of_week user command']
|
||||
|
||||
raw_output['schedule'] = cron_list
|
||||
if len(cleandata) > 1:
|
||||
cron_list = jc.parsers.universal.simple_table_parse(cleandata)
|
||||
|
||||
# Add shortcut entries back in
|
||||
for item in shortcut_list:
|
||||
raw_output['schedule'].append(item)
|
||||
raw_output['schedule'] = cron_list
|
||||
|
||||
# Add shortcut entries back in
|
||||
for item in shortcut_list:
|
||||
raw_output['schedule'].append(item)
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
|
||||
142
jc/parsers/csv.py
Normal file
142
jc/parsers/csv.py
Normal file
@@ -0,0 +1,142 @@
|
||||
"""jc - JSON CLI output utility csv Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --csv as the first argument if the piped input is coming from a csv file.
|
||||
the csv parser will attempt to automatically detect the delimiter character.
|
||||
if the delimiter cannot be detected it will default to comma.
|
||||
the first row of the file must be a header row.
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux', 'darwin', 'cygwin', 'win32', 'aix', 'freebsd'
|
||||
|
||||
Examples:
|
||||
|
||||
$ cat homes.csv
|
||||
"Sell", "List", "Living", "Rooms", "Beds", "Baths", "Age", "Acres", "Taxes"
|
||||
142, 160, 28, 10, 5, 3, 60, 0.28, 3167
|
||||
175, 180, 18, 8, 4, 1, 12, 0.43, 4033
|
||||
129, 132, 13, 6, 3, 1, 41, 0.33, 1471
|
||||
...
|
||||
|
||||
$ cat homes.csv | jc --csv -p
|
||||
[
|
||||
{
|
||||
"Sell": "142",
|
||||
"List": "160",
|
||||
"Living": "28",
|
||||
"Rooms": "10",
|
||||
"Beds": "5",
|
||||
"Baths": "3",
|
||||
"Age": "60",
|
||||
"Acres": "0.28",
|
||||
"Taxes": "3167"
|
||||
},
|
||||
{
|
||||
"Sell": "175",
|
||||
"List": "180",
|
||||
"Living": "18",
|
||||
"Rooms": "8",
|
||||
"Beds": "4",
|
||||
"Baths": "1",
|
||||
"Age": "12",
|
||||
"Acres": "0.43",
|
||||
"Taxes": "4033"
|
||||
},
|
||||
{
|
||||
"Sell": "129",
|
||||
"List": "132",
|
||||
"Living": "13",
|
||||
"Rooms": "6",
|
||||
"Beds": "3",
|
||||
"Baths": "1",
|
||||
"Age": "41",
|
||||
"Acres": "0.33",
|
||||
"Taxes": "1471"
|
||||
},
|
||||
...
|
||||
]
|
||||
"""
|
||||
import jc.utils
|
||||
import csv
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.1'
|
||||
description = 'CSV file parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
details = 'Using the python standard csv library'
|
||||
|
||||
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
|
||||
compatible = ['linux', 'darwin', 'cygwin', 'win32', 'aix', 'freebsd']
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
|
||||
def process(proc_data):
|
||||
"""
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Each dictionary represents a row in the csv file:
|
||||
|
||||
[
|
||||
{
|
||||
csv file converted to a Dictionary
|
||||
https://docs.python.org/3/library/csv.html
|
||||
}
|
||||
]
|
||||
"""
|
||||
|
||||
# No further processing
|
||||
return proc_data
|
||||
|
||||
|
||||
def parse(data, raw=False, quiet=False):
|
||||
"""
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Raw or processed structured data.
|
||||
"""
|
||||
if not quiet:
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
raw_output = []
|
||||
cleandata = data.splitlines()
|
||||
|
||||
# Clear any blank lines
|
||||
cleandata = list(filter(None, cleandata))
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
dialect = None
|
||||
try:
|
||||
dialect = csv.Sniffer().sniff(data[:1024])
|
||||
except Exception:
|
||||
pass
|
||||
|
||||
reader = csv.DictReader(cleandata, dialect=dialect)
|
||||
|
||||
for row in reader:
|
||||
raw_output.append(row)
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
else:
|
||||
return process(raw_output)
|
||||
@@ -6,7 +6,7 @@ Usage:
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux', 'darwin'
|
||||
'linux', 'darwin', 'freebsd'
|
||||
|
||||
Examples:
|
||||
|
||||
@@ -73,13 +73,13 @@ import jc.parsers.universal
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.1'
|
||||
version = '1.5'
|
||||
description = 'df command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
|
||||
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
|
||||
compatible = ['linux', 'darwin']
|
||||
compatible = ['linux', 'darwin', 'freebsd']
|
||||
magic_commands = ['df']
|
||||
|
||||
|
||||
@@ -135,7 +135,7 @@ def process(proc_data):
|
||||
|
||||
# change any entry for key with '_blocks' in the name to int
|
||||
for k in entry:
|
||||
if str(k).find('_blocks') != -1:
|
||||
if '_blocks' in str(k):
|
||||
try:
|
||||
blocks_int = int(entry[k])
|
||||
entry[k] = blocks_int
|
||||
@@ -184,14 +184,17 @@ def parse(data, raw=False, quiet=False):
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
cleandata = data.splitlines()
|
||||
raw_output = []
|
||||
|
||||
# fix headers
|
||||
cleandata[0] = cleandata[0].lower()
|
||||
cleandata[0] = cleandata[0].replace('-', '_')
|
||||
cleandata[0] = cleandata[0].replace('mounted on', 'mounted_on')
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
# parse the data
|
||||
raw_output = jc.parsers.universal.sparse_table_parse(cleandata)
|
||||
# fix headers
|
||||
cleandata[0] = cleandata[0].lower()
|
||||
cleandata[0] = cleandata[0].replace('-', '_')
|
||||
cleandata[0] = cleandata[0].replace('mounted on', 'mounted_on')
|
||||
|
||||
# parse the data
|
||||
raw_output = jc.parsers.universal.sparse_table_parse(cleandata)
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
|
||||
@@ -324,7 +324,7 @@ import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.0'
|
||||
version = '1.3'
|
||||
description = 'dig command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -361,6 +361,15 @@ def process(proc_data):
|
||||
"answer_num": integer,
|
||||
"authority_num": integer,
|
||||
"additional_num": integer,
|
||||
"axfr": [
|
||||
{
|
||||
"name": string,
|
||||
"class": string,
|
||||
"type": string,
|
||||
"ttl": integer,
|
||||
"data": string
|
||||
}
|
||||
],
|
||||
"question": {
|
||||
"name": string,
|
||||
"class": string,
|
||||
@@ -388,6 +397,7 @@ def process(proc_data):
|
||||
"server": string,
|
||||
"when": string,
|
||||
"rcvd": integer
|
||||
"size": string
|
||||
}
|
||||
]
|
||||
"""
|
||||
@@ -402,6 +412,14 @@ def process(proc_data):
|
||||
except (ValueError):
|
||||
entry[key] = None
|
||||
|
||||
if 'axfr' in entry:
|
||||
for ax in entry['axfr']:
|
||||
try:
|
||||
ttl_int = int(ax['ttl'])
|
||||
ax['ttl'] = ttl_int
|
||||
except (ValueError):
|
||||
ax['ttl'] = None
|
||||
|
||||
if 'answer' in entry:
|
||||
for ans in entry['answer']:
|
||||
try:
|
||||
@@ -508,6 +526,25 @@ def parse_answer(answer):
|
||||
'data': answer_data}
|
||||
|
||||
|
||||
def parse_axfr(axfr):
|
||||
# ; <<>> DiG 9.11.14-3-Debian <<>> @81.4.108.41 axfr zonetransfer.me +nocookie
|
||||
# ; (1 server found)
|
||||
# ;; global options: +cmd
|
||||
# zonetransfer.me. 7200 IN A 5.196.105.14
|
||||
axfr = axfr.split(maxsplit=4)
|
||||
axfr_name = axfr[0]
|
||||
axfr_ttl = axfr[1]
|
||||
axfr_class = axfr[2]
|
||||
axfr_type = axfr[3]
|
||||
axfr_data = axfr[4]
|
||||
|
||||
return {'name': axfr_name,
|
||||
'ttl': axfr_ttl,
|
||||
'class': axfr_class,
|
||||
'type': axfr_type,
|
||||
'data': axfr_data}
|
||||
|
||||
|
||||
def parse(data, raw=False, quiet=False):
|
||||
"""
|
||||
Main text parsing function
|
||||
@@ -534,80 +571,106 @@ def parse(data, raw=False, quiet=False):
|
||||
question = False
|
||||
authority = False
|
||||
answer = False
|
||||
axfr = False
|
||||
|
||||
output_entry = {}
|
||||
for line in cleandata:
|
||||
|
||||
if line.find(';; ->>HEADER<<-') == 0:
|
||||
output_entry = {}
|
||||
output_entry.update(parse_header(line))
|
||||
continue
|
||||
if jc.utils.has_data(data):
|
||||
for line in cleandata:
|
||||
|
||||
if line.find(';; flags:') == 0:
|
||||
output_entry.update(parse_flags_line(line))
|
||||
continue
|
||||
if line.startswith('; <<>> ') and ' axfr ' in line.lower():
|
||||
question = False
|
||||
authority = False
|
||||
answer = False
|
||||
axfr = True
|
||||
axfr_list = []
|
||||
continue
|
||||
|
||||
if line.find(';; QUESTION SECTION:') == 0:
|
||||
question = True
|
||||
authority = False
|
||||
answer = False
|
||||
continue
|
||||
if ';' not in line and axfr:
|
||||
axfr_list.append(parse_axfr(line))
|
||||
output_entry.update({'axfr': axfr_list})
|
||||
continue
|
||||
|
||||
if question:
|
||||
output_entry['question'] = parse_question(line)
|
||||
question = False
|
||||
authority = False
|
||||
answer = False
|
||||
continue
|
||||
if line.startswith(';; ->>HEADER<<-'):
|
||||
output_entry = {}
|
||||
output_entry.update(parse_header(line))
|
||||
continue
|
||||
|
||||
if line.find(';; AUTHORITY SECTION:') == 0:
|
||||
question = False
|
||||
authority = True
|
||||
answer = False
|
||||
authority_list = []
|
||||
continue
|
||||
if line.startswith(';; flags:'):
|
||||
output_entry.update(parse_flags_line(line))
|
||||
continue
|
||||
|
||||
if line.find(';') == -1 and authority:
|
||||
authority_list.append(parse_authority(line))
|
||||
output_entry.update({'authority': authority_list})
|
||||
continue
|
||||
if line.startswith(';; QUESTION SECTION:'):
|
||||
question = True
|
||||
authority = False
|
||||
answer = False
|
||||
axfr = False
|
||||
continue
|
||||
|
||||
if line.find(';; ANSWER SECTION:') == 0:
|
||||
question = False
|
||||
authority = False
|
||||
answer = True
|
||||
answer_list = []
|
||||
continue
|
||||
if question:
|
||||
output_entry['question'] = parse_question(line)
|
||||
question = False
|
||||
authority = False
|
||||
answer = False
|
||||
axfr = False
|
||||
continue
|
||||
|
||||
if line.find(';') == -1 and answer:
|
||||
answer_list.append(parse_answer(line))
|
||||
output_entry.update({'answer': answer_list})
|
||||
continue
|
||||
if line.startswith(';; AUTHORITY SECTION:'):
|
||||
question = False
|
||||
authority = True
|
||||
answer = False
|
||||
axfr = False
|
||||
authority_list = []
|
||||
continue
|
||||
|
||||
# footer consists of 4 lines
|
||||
# footer line 1
|
||||
if line.find(';; Query time:') == 0:
|
||||
output_entry.update({'query_time': line.split(':')[1].lstrip()})
|
||||
continue
|
||||
if ';' not in line and authority:
|
||||
authority_list.append(parse_authority(line))
|
||||
output_entry.update({'authority': authority_list})
|
||||
continue
|
||||
|
||||
# footer line 2
|
||||
if line.find(';; SERVER:') == 0:
|
||||
output_entry.update({'server': line.split(':')[1].lstrip()})
|
||||
continue
|
||||
if line.startswith(';; ANSWER SECTION:'):
|
||||
question = False
|
||||
authority = False
|
||||
answer = True
|
||||
axfr = False
|
||||
answer_list = []
|
||||
continue
|
||||
|
||||
# footer line 3
|
||||
if line.find(';; WHEN:') == 0:
|
||||
output_entry.update({'when': line.split(':', maxsplit=1)[1].lstrip()})
|
||||
continue
|
||||
if ';' not in line and answer:
|
||||
answer_list.append(parse_answer(line))
|
||||
output_entry.update({'answer': answer_list})
|
||||
continue
|
||||
|
||||
# footer line 4 (last line)
|
||||
if line.find(';; MSG SIZE rcvd:') == 0:
|
||||
output_entry.update({'rcvd': line.split(':')[1].lstrip()})
|
||||
# footer consists of 4 lines
|
||||
# footer line 1
|
||||
if line.startswith(';; Query time:'):
|
||||
output_entry.update({'query_time': line.split(':')[1].lstrip()})
|
||||
continue
|
||||
|
||||
if output_entry:
|
||||
raw_output.append(output_entry)
|
||||
# footer line 2
|
||||
if line.startswith(';; SERVER:'):
|
||||
output_entry.update({'server': line.split(':')[1].lstrip()})
|
||||
continue
|
||||
|
||||
# footer line 3
|
||||
if line.startswith(';; WHEN:'):
|
||||
output_entry.update({'when': line.split(':', maxsplit=1)[1].lstrip()})
|
||||
continue
|
||||
|
||||
# footer line 4 (last line)
|
||||
if line.startswith(';; MSG SIZE rcvd:'):
|
||||
output_entry.update({'rcvd': line.split(':')[1].lstrip()})
|
||||
|
||||
if output_entry:
|
||||
raw_output.append(output_entry)
|
||||
elif line.startswith(';; XFR size:'):
|
||||
output_entry.update({'size': line.split(':')[1].lstrip()})
|
||||
|
||||
if output_entry:
|
||||
raw_output.append(output_entry)
|
||||
|
||||
raw_output = list(filter(None, raw_output))
|
||||
|
||||
raw_output = list(filter(None, raw_output))
|
||||
if raw:
|
||||
return raw_output
|
||||
else:
|
||||
|
||||
341
jc/parsers/dmidecode.py
Normal file
341
jc/parsers/dmidecode.py
Normal file
@@ -0,0 +1,341 @@
|
||||
"""jc - JSON CLI output utility dmidecode Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --dmidecode as the first argument if the piped input is coming from dmidecode
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux'
|
||||
|
||||
Examples:
|
||||
|
||||
# dmidecode | jc --dmidecode -p
|
||||
[
|
||||
{
|
||||
"handle": "0x0000",
|
||||
"type": 0,
|
||||
"bytes": 24,
|
||||
"description": "BIOS Information",
|
||||
"values": {
|
||||
"vendor": "Phoenix Technologies LTD",
|
||||
"version": "6.00",
|
||||
"release_date": "04/13/2018",
|
||||
"address": "0xEA490",
|
||||
"runtime_size": "88944 bytes",
|
||||
"rom_size": "64 kB",
|
||||
"characteristics": [
|
||||
"ISA is supported",
|
||||
"PCI is supported",
|
||||
"PC Card (PCMCIA) is supported",
|
||||
"PNP is supported",
|
||||
"APM is supported",
|
||||
"BIOS is upgradeable",
|
||||
"BIOS shadowing is allowed",
|
||||
"ESCD support is available",
|
||||
"Boot from CD is supported",
|
||||
"Selectable boot is supported",
|
||||
"EDD is supported",
|
||||
"Print screen service is supported (int 5h)",
|
||||
"8042 keyboard services are supported (int 9h)",
|
||||
"Serial services are supported (int 14h)",
|
||||
"Printer services are supported (int 17h)",
|
||||
"CGA/mono video services are supported (int 10h)",
|
||||
"ACPI is supported",
|
||||
"Smart battery is supported",
|
||||
"BIOS boot specification is supported",
|
||||
"Function key-initiated network boot is supported",
|
||||
"Targeted content distribution is supported"
|
||||
],
|
||||
"bios_revision": "4.6",
|
||||
"firmware_revision": "0.0"
|
||||
}
|
||||
},
|
||||
...
|
||||
]
|
||||
|
||||
# dmidecode | jc --dmidecode -p -r
|
||||
[
|
||||
{
|
||||
"handle": "0x0000",
|
||||
"type": "0",
|
||||
"bytes": "24",
|
||||
"description": "BIOS Information",
|
||||
"values": {
|
||||
"vendor": "Phoenix Technologies LTD",
|
||||
"version": "6.00",
|
||||
"release_date": "04/13/2018",
|
||||
"address": "0xEA490",
|
||||
"runtime_size": "88944 bytes",
|
||||
"rom_size": "64 kB",
|
||||
"characteristics": [
|
||||
"ISA is supported",
|
||||
"PCI is supported",
|
||||
"PC Card (PCMCIA) is supported",
|
||||
"PNP is supported",
|
||||
"APM is supported",
|
||||
"BIOS is upgradeable",
|
||||
"BIOS shadowing is allowed",
|
||||
"ESCD support is available",
|
||||
"Boot from CD is supported",
|
||||
"Selectable boot is supported",
|
||||
"EDD is supported",
|
||||
"Print screen service is supported (int 5h)",
|
||||
"8042 keyboard services are supported (int 9h)",
|
||||
"Serial services are supported (int 14h)",
|
||||
"Printer services are supported (int 17h)",
|
||||
"CGA/mono video services are supported (int 10h)",
|
||||
"ACPI is supported",
|
||||
"Smart battery is supported",
|
||||
"BIOS boot specification is supported",
|
||||
"Function key-initiated network boot is supported",
|
||||
"Targeted content distribution is supported"
|
||||
],
|
||||
"bios_revision": "4.6",
|
||||
"firmware_revision": "0.0"
|
||||
}
|
||||
},
|
||||
...
|
||||
]
|
||||
"""
|
||||
import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.1'
|
||||
description = 'dmidecode command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
# details = 'enter any other details here'
|
||||
|
||||
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
|
||||
compatible = ['linux']
|
||||
magic_commands = ['dmidecode']
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
|
||||
def process(proc_data):
|
||||
"""
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Structured data with the following schema:
|
||||
|
||||
[
|
||||
{
|
||||
"handle": string,
|
||||
"type": integer,
|
||||
"bytes": integer,
|
||||
"description": string,
|
||||
"values": { (null if empty)
|
||||
"lowercase_no_spaces_keys": string,
|
||||
"multiline_key_values": [
|
||||
string,
|
||||
]
|
||||
}
|
||||
}
|
||||
]
|
||||
"""
|
||||
for entry in proc_data:
|
||||
int_list = ['type', 'bytes']
|
||||
for key in int_list:
|
||||
if key in entry:
|
||||
try:
|
||||
key_int = int(entry[key])
|
||||
entry[key] = key_int
|
||||
except (ValueError):
|
||||
entry[key] = None
|
||||
|
||||
if not entry['values']:
|
||||
entry['values'] = None
|
||||
|
||||
return proc_data
|
||||
|
||||
|
||||
def parse(data, raw=False, quiet=False):
|
||||
"""
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Raw or processed structured data.
|
||||
"""
|
||||
if not quiet:
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
item_header = False
|
||||
item_values = False
|
||||
value_list = False
|
||||
|
||||
item = None
|
||||
header = None
|
||||
key = None
|
||||
val = None
|
||||
attribute = None
|
||||
values = None
|
||||
key_data = None
|
||||
|
||||
raw_output = []
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
data = data.splitlines()
|
||||
|
||||
# remove header rows
|
||||
for row in data.copy():
|
||||
if row:
|
||||
data.pop(0)
|
||||
else:
|
||||
break
|
||||
|
||||
# main parsing loop
|
||||
for line in data:
|
||||
# new item
|
||||
if not line:
|
||||
item_header = True
|
||||
item_values = False
|
||||
value_list = False
|
||||
|
||||
if item:
|
||||
if values:
|
||||
item['values'][attribute] = values
|
||||
if key_data:
|
||||
item['values'][f'{key}_data'] = key_data
|
||||
raw_output.append(item)
|
||||
|
||||
item = {}
|
||||
header = None
|
||||
key = None
|
||||
val = None
|
||||
attribute = None
|
||||
values = []
|
||||
key_data = []
|
||||
continue
|
||||
|
||||
# header
|
||||
if line.startswith('Handle ') and line.endswith('bytes'):
|
||||
|
||||
# Handle 0x0000, DMI type 0, 24 bytes
|
||||
header = line.replace(',', ' ').split()
|
||||
item = {
|
||||
'handle': header[1],
|
||||
'type': header[4],
|
||||
'bytes': header[5]
|
||||
}
|
||||
continue
|
||||
|
||||
# description
|
||||
if item_header:
|
||||
item_header = False
|
||||
item_values = True
|
||||
value_list = False
|
||||
|
||||
item['description'] = line
|
||||
item['values'] = {}
|
||||
continue
|
||||
|
||||
# new item if multiple descriptions in handle
|
||||
if not item_header and not line.startswith('\t'):
|
||||
item_header = False
|
||||
item_values = True
|
||||
value_list = False
|
||||
|
||||
if item:
|
||||
if values:
|
||||
item['values'][attribute] = values
|
||||
if key_data:
|
||||
item['values'][f'{key}_data'] = key_data
|
||||
raw_output.append(item)
|
||||
|
||||
item = {
|
||||
'handle': header[1],
|
||||
'type': header[4],
|
||||
'bytes': header[5],
|
||||
'description': line,
|
||||
'values': {}
|
||||
}
|
||||
|
||||
key = None
|
||||
val = None
|
||||
attribute = None
|
||||
values = []
|
||||
key_data = []
|
||||
continue
|
||||
|
||||
# keys and values
|
||||
if item_values \
|
||||
and len(line.split(':', maxsplit=1)) == 2 \
|
||||
and line.startswith('\t') \
|
||||
and not line.startswith('\t\t') \
|
||||
and not line.strip().endswith(':'):
|
||||
item_header = False
|
||||
item_values = True
|
||||
value_list = False
|
||||
|
||||
if values:
|
||||
item['values'][attribute] = values
|
||||
values = []
|
||||
if key_data:
|
||||
item['values'][f'{key}_data'] = key_data
|
||||
key_data = []
|
||||
|
||||
key = line.split(':', maxsplit=1)[0].strip().lower().replace(' ', '_')
|
||||
val = line.split(':', maxsplit=1)[1].strip()
|
||||
item['values'].update({key: val})
|
||||
continue
|
||||
|
||||
# multi-line key
|
||||
if item_values \
|
||||
and line.startswith('\t') \
|
||||
and not line.startswith('\t\t') \
|
||||
and line.strip().endswith(':'):
|
||||
item_header = False
|
||||
item_values = True
|
||||
value_list = True
|
||||
|
||||
if values:
|
||||
item['values'][attribute] = values
|
||||
values = []
|
||||
if key_data:
|
||||
item['values'][f'{key}_data'] = key_data
|
||||
key_data = []
|
||||
|
||||
attribute = line[:-1].strip().lower().replace(' ', '_')
|
||||
values = []
|
||||
continue
|
||||
|
||||
# multi-line values
|
||||
if value_list \
|
||||
and line.startswith('\t\t'):
|
||||
values.append(line.strip())
|
||||
continue
|
||||
|
||||
# data for hybrid multi-line objects
|
||||
if item_values \
|
||||
and not value_list \
|
||||
and line.startswith('\t\t'):
|
||||
if f'{key}_data' not in item['values']:
|
||||
item['values'][f'{key}_data'] = []
|
||||
key_data.append(line.strip())
|
||||
continue
|
||||
|
||||
if item:
|
||||
raw_output.append(item)
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
else:
|
||||
return process(raw_output)
|
||||
@@ -73,7 +73,7 @@ import jc.parsers.universal
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.1'
|
||||
version = '1.2'
|
||||
description = 'du command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -137,12 +137,12 @@ def parse(data, raw=False, quiet=False):
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
raw_output = []
|
||||
cleandata = data.splitlines()
|
||||
|
||||
# Clear any blank lines
|
||||
cleandata = list(filter(None, cleandata))
|
||||
cleandata = list(filter(None, data.splitlines()))
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
if cleandata:
|
||||
cleandata.insert(0, 'size name')
|
||||
raw_output = jc.parsers.universal.simple_table_parse(cleandata)
|
||||
|
||||
|
||||
@@ -52,7 +52,7 @@ import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.1'
|
||||
version = '1.2'
|
||||
description = 'env command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -116,12 +116,10 @@ def parse(data, raw=False, quiet=False):
|
||||
|
||||
raw_output = {}
|
||||
|
||||
linedata = data.splitlines()
|
||||
|
||||
# Clear any blank lines
|
||||
cleandata = list(filter(None, linedata))
|
||||
cleandata = list(filter(None, data.splitlines()))
|
||||
|
||||
if cleandata:
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
for entry in cleandata:
|
||||
parsed_line = entry.split('=', maxsplit=1)
|
||||
|
||||
131
jc/parsers/file.py
Normal file
131
jc/parsers/file.py
Normal file
@@ -0,0 +1,131 @@
|
||||
"""jc - JSON CLI output utility file command Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --file as the first argument if the piped input is coming from file.
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux', 'aix', 'freebsd', 'darwin'
|
||||
|
||||
Examples:
|
||||
|
||||
$ file * | jc --file -p
|
||||
[
|
||||
{
|
||||
"filename": "Applications",
|
||||
"type": "directory"
|
||||
},
|
||||
{
|
||||
"filename": "another file with spaces",
|
||||
"type": "empty"
|
||||
},
|
||||
{
|
||||
"filename": "argstest.py",
|
||||
"type": "Python script text executable, ASCII text"
|
||||
},
|
||||
{
|
||||
"filename": "blkid-p.out",
|
||||
"type": "ASCII text"
|
||||
},
|
||||
{
|
||||
"filename": "blkid-pi.out",
|
||||
"type": "ASCII text, with very long lines"
|
||||
},
|
||||
{
|
||||
"filename": "cd_catalog.xml",
|
||||
"type": "XML 1.0 document text, ASCII text, with CRLF line terminators"
|
||||
},
|
||||
{
|
||||
"filename": "centosserial.sh",
|
||||
"type": "Bourne-Again shell script text executable, UTF-8 Unicode text"
|
||||
},
|
||||
...
|
||||
]
|
||||
"""
|
||||
import jc.utils
|
||||
import jc.parsers.universal
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.2'
|
||||
description = 'file command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
|
||||
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
|
||||
compatible = ['linux', 'aix', 'freebsd', 'darwin']
|
||||
magic_commands = ['file']
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
|
||||
def process(proc_data):
|
||||
"""
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Structured data with the following schema:
|
||||
|
||||
[
|
||||
{
|
||||
"filename": string,
|
||||
"type ": string
|
||||
}
|
||||
]
|
||||
"""
|
||||
# No further processing
|
||||
return proc_data
|
||||
|
||||
|
||||
def parse(data, raw=False, quiet=False):
|
||||
"""
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Raw or processed structured data.
|
||||
"""
|
||||
if not quiet:
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
raw_output = []
|
||||
|
||||
warned = False
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
for line in filter(None, data.splitlines()):
|
||||
linedata = line.rsplit(': ', maxsplit=1)
|
||||
|
||||
try:
|
||||
filename = linedata[0].strip()
|
||||
filetype = linedata[1].strip()
|
||||
|
||||
raw_output.append(
|
||||
{
|
||||
'filename': filename,
|
||||
'type': filetype
|
||||
}
|
||||
)
|
||||
except IndexError:
|
||||
if not warned:
|
||||
jc.utils.warning_message('Filenames with newline characters detected. Some filenames may be truncated.')
|
||||
warned = True
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
else:
|
||||
return process(raw_output)
|
||||
@@ -77,14 +77,12 @@ def parse(data, raw=False, quiet=False):
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
raw_output = []
|
||||
cleandata = data.splitlines()
|
||||
|
||||
# Clear any blank lines
|
||||
cleandata = list(filter(None, cleandata))
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
if cleandata:
|
||||
# parse the content
|
||||
pass
|
||||
for line in filter(None, data.splitlines()):
|
||||
# parse the content
|
||||
pass
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
|
||||
@@ -53,7 +53,7 @@ import jc.parsers.universal
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.0'
|
||||
version = '1.2'
|
||||
description = 'free command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -122,14 +122,18 @@ def parse(data, raw=False, quiet=False):
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
cleandata = data.splitlines()
|
||||
cleandata[0] = cleandata[0].lower()
|
||||
cleandata[0] = cleandata[0].replace('buff/cache', 'buff_cache')
|
||||
cleandata[0] = 'type ' + cleandata[0]
|
||||
raw_output = []
|
||||
|
||||
raw_output = jc.parsers.universal.simple_table_parse(cleandata)
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
for entry in raw_output:
|
||||
entry['type'] = entry['type'].rstrip(':')
|
||||
cleandata[0] = cleandata[0].lower()
|
||||
cleandata[0] = cleandata[0].replace('buff/cache', 'buff_cache')
|
||||
cleandata[0] = 'type ' + cleandata[0]
|
||||
|
||||
raw_output = jc.parsers.universal.simple_table_parse(cleandata)
|
||||
|
||||
for entry in raw_output:
|
||||
entry['type'] = entry['type'].rstrip(':')
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
|
||||
@@ -6,7 +6,7 @@ Usage:
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux'
|
||||
'linux', 'freebsd'
|
||||
|
||||
Examples:
|
||||
|
||||
@@ -70,13 +70,13 @@ import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.0'
|
||||
version = '1.3'
|
||||
description = 'fstab file parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
|
||||
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
|
||||
compatible = ['linux']
|
||||
compatible = ['linux', 'freebsd']
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
@@ -141,11 +141,12 @@ def parse(data, raw=False, quiet=False):
|
||||
# Clear any blank lines
|
||||
cleandata = list(filter(None, cleandata))
|
||||
|
||||
if cleandata:
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
for line in cleandata:
|
||||
output_line = {}
|
||||
# ignore commented lines
|
||||
if line.strip().find('#') == 0:
|
||||
if line.strip().startswith('#'):
|
||||
continue
|
||||
|
||||
line_list = line.split(maxsplit=6)
|
||||
|
||||
191
jc/parsers/group.py
Normal file
191
jc/parsers/group.py
Normal file
@@ -0,0 +1,191 @@
|
||||
"""jc - JSON CLI output utility /etc/group file Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --group as the first argument if the piped input is coming from /etc/group
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux', 'darwin', 'aix', 'freebsd'
|
||||
|
||||
Examples:
|
||||
|
||||
$ cat /etc/group | jc --group -p
|
||||
[
|
||||
{
|
||||
"group_name": "nobody",
|
||||
"password": "*",
|
||||
"gid": -2,
|
||||
"members": []
|
||||
},
|
||||
{
|
||||
"group_name": "nogroup",
|
||||
"password": "*",
|
||||
"gid": -1,
|
||||
"members": []
|
||||
},
|
||||
{
|
||||
"group_name": "wheel",
|
||||
"password": "*",
|
||||
"gid": 0,
|
||||
"members": [
|
||||
"root"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group_name": "certusers",
|
||||
"password": "*",
|
||||
"gid": 29,
|
||||
"members": [
|
||||
"root",
|
||||
"_jabber",
|
||||
"_postfix",
|
||||
"_cyrus",
|
||||
"_calendar",
|
||||
"_dovecot"
|
||||
]
|
||||
},
|
||||
...
|
||||
]
|
||||
|
||||
$ cat /etc/group | jc --group -p -r
|
||||
[
|
||||
{
|
||||
"group_name": "nobody",
|
||||
"password": "*",
|
||||
"gid": "-2",
|
||||
"members": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"group_name": "nogroup",
|
||||
"password": "*",
|
||||
"gid": "-1",
|
||||
"members": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"group_name": "wheel",
|
||||
"password": "*",
|
||||
"gid": "0",
|
||||
"members": [
|
||||
"root"
|
||||
]
|
||||
},
|
||||
{
|
||||
"group_name": "certusers",
|
||||
"password": "*",
|
||||
"gid": "29",
|
||||
"members": [
|
||||
"root",
|
||||
"_jabber",
|
||||
"_postfix",
|
||||
"_cyrus",
|
||||
"_calendar",
|
||||
"_dovecot"
|
||||
]
|
||||
},
|
||||
...
|
||||
]
|
||||
"""
|
||||
import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.1'
|
||||
description = '/etc/group file parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
# details = 'enter any other details here'
|
||||
|
||||
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
|
||||
compatible = ['linux', 'darwin', 'aix', 'freebsd']
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
|
||||
def process(proc_data):
|
||||
"""
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Structured data with the following schema:
|
||||
|
||||
[
|
||||
{
|
||||
"group_name": string,
|
||||
"password": string,
|
||||
"gid": integer,
|
||||
"members": [
|
||||
string
|
||||
]
|
||||
}
|
||||
]
|
||||
"""
|
||||
for entry in proc_data:
|
||||
int_list = ['gid']
|
||||
for key in int_list:
|
||||
if key in entry:
|
||||
try:
|
||||
key_int = int(entry[key])
|
||||
entry[key] = key_int
|
||||
except (ValueError):
|
||||
entry[key] = None
|
||||
|
||||
if entry['members'] == ['']:
|
||||
entry['members'] = []
|
||||
|
||||
return proc_data
|
||||
|
||||
|
||||
def parse(data, raw=False, quiet=False):
|
||||
"""
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Raw or processed structured data.
|
||||
"""
|
||||
if not quiet:
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
raw_output = []
|
||||
cleandata = data.splitlines()
|
||||
|
||||
# Clear any blank lines
|
||||
cleandata = list(filter(None, cleandata))
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
for entry in cleandata:
|
||||
if entry.startswith('#'):
|
||||
continue
|
||||
|
||||
output_line = {}
|
||||
fields = entry.split(':')
|
||||
|
||||
output_line['group_name'] = fields[0]
|
||||
output_line['password'] = fields[1]
|
||||
output_line['gid'] = fields[2]
|
||||
output_line['members'] = fields[3].split(',')
|
||||
|
||||
raw_output.append(output_line)
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
else:
|
||||
return process(raw_output)
|
||||
153
jc/parsers/gshadow.py
Normal file
153
jc/parsers/gshadow.py
Normal file
@@ -0,0 +1,153 @@
|
||||
"""jc - JSON CLI output utility /etc/gshadow file Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --gshadow as the first argument if the piped input is coming from /etc/gshadow
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux', 'aix', 'freebsd'
|
||||
|
||||
Examples:
|
||||
|
||||
$ cat /etc/gshadow | jc --gshadow -p
|
||||
[
|
||||
{
|
||||
"group_name": "root",
|
||||
"password": "*",
|
||||
"administrators": [],
|
||||
"members": []
|
||||
},
|
||||
{
|
||||
"group_name": "adm",
|
||||
"password": "*",
|
||||
"administrators": [],
|
||||
"members": [
|
||||
"syslog",
|
||||
"joeuser"
|
||||
]
|
||||
},
|
||||
...
|
||||
]
|
||||
|
||||
$ cat /etc/gshadow | jc --gshadow -p -r
|
||||
[
|
||||
{
|
||||
"group_name": "root",
|
||||
"password": "*",
|
||||
"administrators": [
|
||||
""
|
||||
],
|
||||
"members": [
|
||||
""
|
||||
]
|
||||
},
|
||||
{
|
||||
"group_name": "adm",
|
||||
"password": "*",
|
||||
"administrators": [
|
||||
""
|
||||
],
|
||||
"members": [
|
||||
"syslog",
|
||||
"joeuser"
|
||||
]
|
||||
},
|
||||
...
|
||||
]
|
||||
"""
|
||||
import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.1'
|
||||
description = '/etc/gshadow file parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
# details = 'enter any other details here'
|
||||
|
||||
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
|
||||
compatible = ['linux', 'aix', 'freebsd']
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
|
||||
def process(proc_data):
|
||||
"""
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Structured data with the following schema:
|
||||
|
||||
[
|
||||
{
|
||||
"group_name": string,
|
||||
"password": string,
|
||||
"administrators": [
|
||||
string
|
||||
],
|
||||
"members": [
|
||||
string
|
||||
]
|
||||
}
|
||||
]
|
||||
"""
|
||||
for entry in proc_data:
|
||||
if entry['administrators'] == ['']:
|
||||
entry['administrators'] = []
|
||||
|
||||
if entry['members'] == ['']:
|
||||
entry['members'] = []
|
||||
|
||||
return proc_data
|
||||
|
||||
|
||||
def parse(data, raw=False, quiet=False):
|
||||
"""
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Raw or processed structured data.
|
||||
"""
|
||||
if not quiet:
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
raw_output = []
|
||||
cleandata = data.splitlines()
|
||||
|
||||
# Clear any blank lines
|
||||
cleandata = list(filter(None, cleandata))
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
for entry in cleandata:
|
||||
if entry.startswith('#'):
|
||||
continue
|
||||
|
||||
output_line = {}
|
||||
fields = entry.split(':')
|
||||
|
||||
output_line['group_name'] = fields[0]
|
||||
output_line['password'] = fields[1]
|
||||
output_line['administrators'] = fields[2].split(',')
|
||||
output_line['members'] = fields[3].split(',')
|
||||
|
||||
raw_output.append(output_line)
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
else:
|
||||
return process(raw_output)
|
||||
@@ -40,14 +40,15 @@ Examples:
|
||||
...
|
||||
}
|
||||
"""
|
||||
import jc
|
||||
import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.1'
|
||||
version = '1.3'
|
||||
description = 'history command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
details = 'Optimizations by https://github.com/philippeitis'
|
||||
|
||||
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
|
||||
compatible = ['linux', 'darwin', 'cygwin', 'aix', 'freebsd']
|
||||
@@ -79,21 +80,11 @@ def process(proc_data):
|
||||
# rebuild output for added semantic information
|
||||
processed = []
|
||||
for k, v in proc_data.items():
|
||||
proc_line = {}
|
||||
proc_line['line'] = k
|
||||
proc_line['command'] = v
|
||||
proc_line = {
|
||||
'line': int(k) if k.isdigit() else None,
|
||||
'command': v,
|
||||
}
|
||||
processed.append(proc_line)
|
||||
|
||||
for entry in processed:
|
||||
int_list = ['line']
|
||||
for key in int_list:
|
||||
if key in entry:
|
||||
try:
|
||||
key_int = int(entry[key])
|
||||
entry[key] = key_int
|
||||
except (ValueError):
|
||||
entry[key] = None
|
||||
|
||||
return processed
|
||||
|
||||
|
||||
@@ -117,14 +108,13 @@ def parse(data, raw=False, quiet=False):
|
||||
|
||||
raw_output = {}
|
||||
|
||||
# split lines and clear out any non-ascii chars
|
||||
linedata = data.encode('ascii', errors='ignore').decode().splitlines()
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
# Clear any blank lines
|
||||
cleandata = list(filter(None, linedata))
|
||||
# split lines and clear out any non-ascii chars
|
||||
linedata = data.encode('ascii', errors='ignore').decode().splitlines()
|
||||
|
||||
if cleandata:
|
||||
for entry in cleandata:
|
||||
# Skip any blank lines
|
||||
for entry in filter(None, linedata):
|
||||
try:
|
||||
parsed_line = entry.split(maxsplit=1)
|
||||
raw_output[parsed_line[0]] = parsed_line[1]
|
||||
|
||||
@@ -61,7 +61,7 @@ import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.0'
|
||||
version = '1.2'
|
||||
description = '/etc/hosts file parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -117,16 +117,16 @@ def parse(data, raw=False, quiet=False):
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
raw_output = []
|
||||
cleandata = data.splitlines()
|
||||
|
||||
# Clear any blank lines
|
||||
cleandata = list(filter(None, cleandata))
|
||||
cleandata = list(filter(None, data.splitlines()))
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
if cleandata:
|
||||
for line in cleandata:
|
||||
output_line = {}
|
||||
# ignore commented lines
|
||||
if line.strip().find('#') == 0:
|
||||
if line.strip().startswith('#'):
|
||||
continue
|
||||
|
||||
line_list = line.split(maxsplit=1)
|
||||
@@ -136,7 +136,7 @@ def parse(data, raw=False, quiet=False):
|
||||
|
||||
comment_found = False
|
||||
for i, item in enumerate(hosts_list):
|
||||
if item.find('#') != -1:
|
||||
if '#' in item:
|
||||
comment_found = True
|
||||
comment_item = i
|
||||
break
|
||||
|
||||
@@ -70,7 +70,7 @@ import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.0'
|
||||
version = '1.1'
|
||||
description = 'id command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -166,12 +166,12 @@ def parse(data, raw=False, quiet=False):
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
raw_output = {}
|
||||
cleandata = data.split()
|
||||
|
||||
# Clear any blank lines
|
||||
cleandata = list(filter(None, cleandata))
|
||||
cleandata = list(filter(None, data.split()))
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
if cleandata:
|
||||
for section in cleandata:
|
||||
if section.startswith('uid'):
|
||||
uid_parsed = section.replace('(', '=').replace(')', '=')
|
||||
|
||||
@@ -141,16 +141,17 @@ Examples:
|
||||
}
|
||||
]
|
||||
"""
|
||||
import re
|
||||
from collections import namedtuple
|
||||
import jc.utils
|
||||
from ifconfigparser import IfconfigParser
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.5'
|
||||
version = '1.8'
|
||||
description = 'ifconfig command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
details = 'Using ifconfig-parser package from https://github.com/KnightWhoSayNi/ifconfig-parser'
|
||||
details = 'Using ifconfig-parser from https://github.com/KnightWhoSayNi/ifconfig-parser'
|
||||
|
||||
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
|
||||
compatible = ['linux', 'aix', 'freebsd', 'darwin']
|
||||
@@ -160,6 +161,222 @@ class info():
|
||||
__version__ = info.version
|
||||
|
||||
|
||||
class IfconfigParser(object):
|
||||
# Author: threeheadedknight@protonmail.com
|
||||
# Date created: 30.06.2018 17:03
|
||||
# Python Version: 3.7
|
||||
|
||||
# MIT License
|
||||
|
||||
# Copyright (c) 2018 threeheadedknight@protonmail.com
|
||||
|
||||
# Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
# of this software and associated documentation files (the "Software"), to deal
|
||||
# in the Software without restriction, including without limitation the rights
|
||||
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
# copies of the Software, and to permit persons to whom the Software is
|
||||
# furnished to do so, subject to the following conditions:
|
||||
|
||||
# The above copyright notice and this permission notice shall be included in all
|
||||
# copies or substantial portions of the Software.
|
||||
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
# SOFTWARE.
|
||||
|
||||
attributes = ['name', 'type', 'mac_addr', 'ipv4_addr', 'ipv4_bcast', 'ipv4_mask', 'ipv6_addr', 'ipv6_mask',
|
||||
'ipv6_scope', 'state', 'mtu', 'metric', 'rx_packets', 'rx_errors', 'rx_dropped', 'rx_overruns',
|
||||
'rx_frame', 'tx_packets', 'tx_errors', 'tx_dropped', 'tx_overruns', 'tx_carrier', 'tx_collisions',
|
||||
'rx_bytes', 'tx_bytes']
|
||||
|
||||
def __init__(self, console_output):
|
||||
"""
|
||||
:param console_output:
|
||||
"""
|
||||
|
||||
if isinstance(console_output, list):
|
||||
source_data = " ".join(console_output)
|
||||
else:
|
||||
source_data = console_output.replace("\n", " ")
|
||||
self.interfaces = self.parser(source_data=source_data)
|
||||
|
||||
def list_interfaces(self):
|
||||
"""
|
||||
:return:
|
||||
"""
|
||||
return sorted(self.interfaces.keys())
|
||||
|
||||
def count_interfaces(self):
|
||||
"""
|
||||
:return:
|
||||
"""
|
||||
return len(self.interfaces.keys())
|
||||
|
||||
def filter_interfaces(self, **kwargs):
|
||||
"""
|
||||
:param kwargs:
|
||||
:return:
|
||||
"""
|
||||
for attr in kwargs.keys():
|
||||
if attr not in IfconfigParser.attributes:
|
||||
raise ValueError("Attribute [{}] not supported.".format(attr))
|
||||
|
||||
filtered_interfaces = []
|
||||
for name, details in self.interfaces.items():
|
||||
|
||||
if all(getattr(details, attr) == kwargs[attr] for attr in kwargs.keys()):
|
||||
filtered_interfaces.append(name)
|
||||
|
||||
return sorted(filtered_interfaces)
|
||||
|
||||
def get_interface(self, name):
|
||||
"""
|
||||
:param name:
|
||||
:return:
|
||||
"""
|
||||
if name in self.list_interfaces():
|
||||
return self.interfaces[name]
|
||||
else:
|
||||
raise InterfaceNotFound("Interface [{}] not found.".format(name))
|
||||
|
||||
def get_interfaces(self):
|
||||
"""
|
||||
:return:
|
||||
"""
|
||||
return self.interfaces
|
||||
|
||||
def is_available(self, name):
|
||||
"""
|
||||
:param name:
|
||||
:return:
|
||||
"""
|
||||
return name in self.interfaces
|
||||
|
||||
def parser(self, source_data):
|
||||
"""
|
||||
:param source_data:
|
||||
:return:
|
||||
"""
|
||||
|
||||
# Linux syntax
|
||||
re_linux_interface = re.compile(
|
||||
r"(?P<name>[a-zA-Z0-9:._-]+)\s+Link encap:(?P<type>\S+\s?\S+)(\s+HWaddr\s+\b"
|
||||
r"(?P<mac_addr>[0-9A-Fa-f:?]+))?",
|
||||
re.I)
|
||||
re_linux_ipv4 = re.compile(
|
||||
r"inet addr:(?P<ipv4_addr>(?:[0-9]{1,3}\.){3}[0-9]{1,3})(\s+Bcast:"
|
||||
r"(?P<ipv4_bcast>(?:[0-9]{1,3}\.){3}[0-9]{1,3}))?\s+Mask:(?P<ipv4_mask>(?:[0-9]{1,3}\.){3}[0-9]{1,3})",
|
||||
re.I)
|
||||
re_linux_ipv6 = re.compile(
|
||||
r"inet6 addr:\s+(?P<ipv6_addr>\S+)/(?P<ipv6_mask>[0-9]+)\s+Scope:(?P<ipv6_scope>Link|Host)",
|
||||
re.I)
|
||||
re_linux_state = re.compile(
|
||||
r"\W+(?P<state>(?:\w+\s)+)(?:\s+)?MTU:(?P<mtu>[0-9]+)\s+Metric:(?P<metric>[0-9]+)", re.I)
|
||||
re_linux_rx = re.compile(
|
||||
r"RX packets:(?P<rx_packets>[0-9]+)\s+errors:(?P<rx_errors>[0-9]+)\s+dropped:"
|
||||
r"(?P<rx_dropped>[0-9]+)\s+overruns:(?P<rx_overruns>[0-9]+)\s+frame:(?P<rx_frame>[0-9]+)",
|
||||
re.I)
|
||||
re_linux_tx = re.compile(
|
||||
r"TX packets:(?P<tx_packets>[0-9]+)\s+errors:(?P<tx_errors>[0-9]+)\s+dropped:"
|
||||
r"(?P<tx_dropped>[0-9]+)\s+overruns:(?P<tx_overruns>[0-9]+)\s+carrier:(?P<tx_carrier>[0-9]+)",
|
||||
re.I)
|
||||
re_linux_bytes = re.compile(r"\W+RX bytes:(?P<rx_bytes>\d+)\s+\(.*\)\s+TX bytes:(?P<tx_bytes>\d+)\s+\(.*\)", re.I)
|
||||
re_linux_tx_stats = re.compile(r"collisions:(?P<tx_collisions>[0-9]+)\s+txqueuelen:[0-9]+", re.I)
|
||||
re_linux = [re_linux_interface, re_linux_ipv4, re_linux_ipv6, re_linux_state, re_linux_rx, re_linux_tx,
|
||||
re_linux_bytes, re_linux_tx_stats]
|
||||
|
||||
# OpenBSD syntax
|
||||
re_openbsd_interface = re.compile(
|
||||
r"(?P<name>[a-zA-Z0-9:._-]+):\s+flags=(?P<flags>[0-9]+)<(?P<state>\S+)?>\s+mtu\s+(?P<mtu>[0-9]+)",
|
||||
re.I)
|
||||
re_openbsd_ipv4 = re.compile(
|
||||
r"inet (?P<ipv4_addr>(?:[0-9]{1,3}\.){3}[0-9]{1,3})\s+netmask\s+"
|
||||
r"(?P<ipv4_mask>(?:[0-9]{1,3}\.){3}[0-9]{1,3})(\s+broadcast\s+"
|
||||
r"(?P<ipv4_bcast>(?:[0-9]{1,3}\.){3}[0-9]{1,3}))?",
|
||||
re.I)
|
||||
re_openbsd_ipv6 = re.compile(
|
||||
r"inet6\s+(?P<ipv6_addr>\S+)\s+prefixlen\s+(?P<ipv6_mask>[0-9]+)\s+scopeid\s+(?P<ipv6_scope>\w+x\w+)<"
|
||||
r"(?:link|host)>",
|
||||
re.I)
|
||||
re_openbsd_details = re.compile(
|
||||
r"\S+\s+(?:(?P<mac_addr>[0-9A-Fa-f:?]+)\s+)?txqueuelen\s+[0-9]+\s+\((?P<type>\S+\s?\S+)\)", re.I)
|
||||
re_openbsd_rx = re.compile(r"RX packets (?P<rx_packets>[0-9]+)\s+bytes\s+(?P<rx_bytes>\d+)\s+.*", re.I)
|
||||
re_openbsd_rx_stats = re.compile(
|
||||
r"RX errors (?P<rx_errors>[0-9]+)\s+dropped\s+(?P<rx_dropped>[0-9]+)\s+overruns\s+"
|
||||
r"(?P<rx_overruns>[0-9]+)\s+frame\s+(?P<rx_frame>[0-9]+)",
|
||||
re.I)
|
||||
re_openbsd_tx = re.compile(r"TX packets (?P<tx_packets>[0-9]+)\s+bytes\s+(?P<tx_bytes>\d+)\s+.*", re.I)
|
||||
re_openbsd_tx_stats = re.compile(
|
||||
r"TX errors (?P<tx_errors>[0-9]+)\s+dropped\s+(?P<tx_dropped>[0-9]+)\s+overruns\s+"
|
||||
r"(?P<tx_overruns>[0-9]+)\s+carrier\s+(?P<tx_carrier>[0-9]+)\s+collisions\s+(?P<tx_collisions>[0-9]+)",
|
||||
re.I)
|
||||
re_openbsd = [re_openbsd_interface, re_openbsd_ipv4, re_openbsd_ipv6, re_openbsd_details, re_openbsd_rx,
|
||||
re_openbsd_rx_stats, re_openbsd_tx, re_openbsd_tx_stats]
|
||||
|
||||
# FreeBSD syntax
|
||||
re_freebsd_interface = re.compile(
|
||||
r"(?P<name>[a-zA-Z0-9:._-]+):\s+flags=(?P<flags>[0-9]+)<(?P<state>\S+)>\s+metric\s+"
|
||||
r"(?P<metric>[0-9]+)\s+mtu\s+(?P<mtu>[0-9]+)",
|
||||
re.I)
|
||||
re_freebsd_ipv4 = re.compile(
|
||||
r"inet (?P<ipv4_addr>(?:[0-9]{1,3}\.){3}[0-9]{1,3})\s+netmask\s+(?P<ipv4_mask>0x\S+)(\s+broadcast\s+"
|
||||
r"(?P<ipv4_bcast>(?:[0-9]{1,3}\.){3}[0-9]{1,3}))?",
|
||||
re.I)
|
||||
re_freebsd_ipv6 = re.compile(r"\s?inet6\s(?P<ipv6_addr>.*)(?:\%\w+\d+)\sprefixlen\s(?P<ipv6_mask>\d+)(?:\s\w+)?\sscopeid\s(?P<ipv6_scope>\w+x\w+)", re.I)
|
||||
re_freebsd_details = re.compile(r"ether\s+(?P<mac_addr>[0-9A-Fa-f:?]+)", re.I)
|
||||
re_freebsd = [re_freebsd_interface, re_freebsd_ipv4, re_freebsd_ipv6, re_freebsd_details]
|
||||
|
||||
available_interfaces = dict()
|
||||
|
||||
for pattern in [re_linux_interface, re_openbsd_interface, re_freebsd_interface]:
|
||||
network_interfaces = re.finditer(pattern, source_data)
|
||||
positions = []
|
||||
while True:
|
||||
try:
|
||||
pos = next(network_interfaces)
|
||||
positions.append(max(pos.start() - 1, 0))
|
||||
except StopIteration:
|
||||
break
|
||||
if positions:
|
||||
positions.append(len(source_data))
|
||||
break
|
||||
|
||||
if not positions:
|
||||
return available_interfaces
|
||||
|
||||
for l, r in zip(positions, positions[1:]):
|
||||
chunk = source_data[l:r]
|
||||
_interface = dict()
|
||||
for pattern in re_linux + re_openbsd + re_freebsd:
|
||||
match = re.search(pattern, chunk.replace('\t', '\n'))
|
||||
if match:
|
||||
details = match.groupdict()
|
||||
for k, v in details.items():
|
||||
if isinstance(v, str): details[k] = v.strip()
|
||||
_interface.update(details)
|
||||
if _interface is not None:
|
||||
available_interfaces[_interface['name']] = self.update_interface_details(_interface)
|
||||
|
||||
return available_interfaces
|
||||
|
||||
@staticmethod
|
||||
def update_interface_details(interface):
|
||||
for attr in IfconfigParser.attributes:
|
||||
if attr not in interface:
|
||||
interface[attr] = None
|
||||
return namedtuple('Interface', interface.keys())(**interface)
|
||||
|
||||
|
||||
class InterfaceNotFound(Exception):
|
||||
"""
|
||||
"""
|
||||
pass
|
||||
|
||||
|
||||
def process(proc_data):
|
||||
"""
|
||||
Final processing to conform to the schema.
|
||||
@@ -220,7 +437,7 @@ def process(proc_data):
|
||||
# convert OSX-style subnet mask to dotted quad
|
||||
if 'ipv4_mask' in entry:
|
||||
try:
|
||||
if entry['ipv4_mask'].find('0x') == 0:
|
||||
if entry['ipv4_mask'].startswith('0x'):
|
||||
new_mask = entry['ipv4_mask']
|
||||
new_mask = new_mask.lstrip('0x')
|
||||
new_mask = '.'.join(str(int(i, 16)) for i in [new_mask[i:i + 2] for i in range(0, len(new_mask), 2)])
|
||||
@@ -258,14 +475,16 @@ def parse(data, raw=False, quiet=False):
|
||||
|
||||
raw_output = []
|
||||
|
||||
parsed = IfconfigParser(console_output=data)
|
||||
interfaces = parsed.get_interfaces()
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
# convert ifconfigparser output to a dictionary
|
||||
for iface in interfaces:
|
||||
d = interfaces[iface]._asdict()
|
||||
dct = dict(d)
|
||||
raw_output.append(dct)
|
||||
parsed = IfconfigParser(console_output=data)
|
||||
interfaces = parsed.get_interfaces()
|
||||
|
||||
# convert ifconfigparser output to a dictionary
|
||||
for iface in interfaces:
|
||||
d = interfaces[iface]._asdict()
|
||||
dct = dict(d)
|
||||
raw_output.append(dct)
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
|
||||
@@ -2,7 +2,9 @@
|
||||
|
||||
Usage:
|
||||
|
||||
specify --ini as the first argument if the piped input is coming from an INI file
|
||||
Specify --ini as the first argument if the piped input is coming from an INI file or any
|
||||
simple key/value pair file. Delimiter can be '=' or ':'. Missing values are supported.
|
||||
Comment prefix can be '#' or ';'. Comments must be on their own line.
|
||||
|
||||
Compatibility:
|
||||
|
||||
@@ -47,8 +49,8 @@ import configparser
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.0'
|
||||
description = 'INI file parser'
|
||||
version = '1.2'
|
||||
description = 'INI file parser. Also parses files/output containing simple key/value pairs'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
details = 'Using configparser from the standard library'
|
||||
@@ -70,15 +72,33 @@ def process(proc_data):
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary representing an ini document:
|
||||
Dictionary representing an ini or simple key/value pair document:
|
||||
|
||||
{
|
||||
ini document converted to a dictionary
|
||||
see configparser standard library documentation for more details
|
||||
ini or key/value document converted to a dictionary - see configparser standard
|
||||
library documentation for more details.
|
||||
|
||||
Note: Values starting and ending with quotation marks will have the marks removed.
|
||||
If you would like to keep the quotation marks, use the -r or raw=True argument.
|
||||
}
|
||||
"""
|
||||
# remove quotation marks from beginning and end of values
|
||||
for heading in proc_data:
|
||||
# standard ini files with headers
|
||||
if isinstance(proc_data[heading], dict):
|
||||
for key, value in proc_data[heading].items():
|
||||
if value is not None and value.startswith('"') and value.endswith('"'):
|
||||
proc_data[heading][key] = value.lstrip('"').rstrip('"')
|
||||
elif value is None:
|
||||
proc_data[heading][key] = ''
|
||||
|
||||
# simple key/value files with no headers
|
||||
else:
|
||||
if proc_data[heading] is not None and proc_data[heading].startswith('"') and proc_data[heading].endswith('"'):
|
||||
proc_data[heading] = proc_data[heading].lstrip('"').rstrip('"')
|
||||
elif proc_data[heading] is None:
|
||||
proc_data[heading] = ''
|
||||
|
||||
# No further processing
|
||||
return proc_data
|
||||
|
||||
|
||||
@@ -101,10 +121,19 @@ def parse(data, raw=False, quiet=False):
|
||||
|
||||
raw_output = {}
|
||||
|
||||
if data:
|
||||
ini = configparser.ConfigParser()
|
||||
ini.read_string(data)
|
||||
raw_output = {s: dict(ini.items(s)) for s in ini.sections()}
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
ini = configparser.ConfigParser(allow_no_value=True, interpolation=None)
|
||||
try:
|
||||
ini.read_string(data)
|
||||
raw_output = {s: dict(ini.items(s)) for s in ini.sections()}
|
||||
|
||||
except configparser.MissingSectionHeaderError:
|
||||
data = '[data]\n' + data
|
||||
ini.read_string(data)
|
||||
output_dict = {s: dict(ini.items(s)) for s in ini.sections()}
|
||||
for key, value in output_dict['data'].items():
|
||||
raw_output[key] = value
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
|
||||
@@ -134,7 +134,7 @@ import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.1'
|
||||
version = '1.4'
|
||||
description = 'iptables command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -194,19 +194,19 @@ def process(proc_data):
|
||||
if 'bytes' in rule:
|
||||
multiplier = 1
|
||||
if rule['bytes'][-1] == 'K':
|
||||
multiplier = 1000
|
||||
multiplier = 10 ** 3
|
||||
rule['bytes'] = rule['bytes'].rstrip('K')
|
||||
elif rule['bytes'][-1] == 'M':
|
||||
multiplier = 1000000
|
||||
multiplier = 10 ** 6
|
||||
rule['bytes'] = rule['bytes'].rstrip('M')
|
||||
elif rule['bytes'][-1] == 'G':
|
||||
multiplier = 1000000000
|
||||
multiplier = 10 ** 9
|
||||
rule['bytes'] = rule['bytes'].rstrip('G')
|
||||
elif rule['bytes'][-1] == 'T':
|
||||
multiplier = 1000000000000
|
||||
multiplier = 10 ** 12
|
||||
rule['bytes'] = rule['bytes'].rstrip('T')
|
||||
elif rule['bytes'][-1] == 'P':
|
||||
multiplier = 1000000000000000
|
||||
multiplier = 10 ** 15
|
||||
rule['bytes'] = rule['bytes'].rstrip('P')
|
||||
|
||||
try:
|
||||
@@ -243,36 +243,39 @@ def parse(data, raw=False, quiet=False):
|
||||
chain = {}
|
||||
headers = []
|
||||
|
||||
cleandata = data.splitlines()
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
for line in cleandata:
|
||||
for line in list(filter(None, data.splitlines())):
|
||||
|
||||
if line.find('Chain') == 0:
|
||||
if line.startswith('Chain'):
|
||||
if chain:
|
||||
raw_output.append(chain)
|
||||
|
||||
chain = {}
|
||||
headers = []
|
||||
|
||||
parsed_line = line.split()
|
||||
|
||||
chain['chain'] = parsed_line[1]
|
||||
chain['rules'] = []
|
||||
|
||||
continue
|
||||
|
||||
elif line.startswith('target') or line.find('pkts') == 1 or line.startswith('num'):
|
||||
headers = []
|
||||
headers = [h for h in ' '.join(line.lower().strip().split()).split() if h]
|
||||
headers.append("options")
|
||||
|
||||
continue
|
||||
|
||||
else:
|
||||
rule = line.split(maxsplit=len(headers) - 1)
|
||||
temp_rule = dict(zip(headers, rule))
|
||||
if temp_rule:
|
||||
chain['rules'].append(temp_rule)
|
||||
|
||||
if chain:
|
||||
raw_output.append(chain)
|
||||
chain = {}
|
||||
headers = []
|
||||
|
||||
parsed_line = line.split()
|
||||
|
||||
chain['chain'] = parsed_line[1]
|
||||
chain['rules'] = []
|
||||
|
||||
continue
|
||||
|
||||
elif line.find('target') == 0 or line.find('pkts') == 1 or line.find('num') == 0:
|
||||
headers = []
|
||||
headers = [h for h in ' '.join(line.lower().strip().split()).split() if h]
|
||||
headers.append("options")
|
||||
|
||||
continue
|
||||
|
||||
else:
|
||||
rule = line.split(maxsplit=len(headers) - 1)
|
||||
temp_rule = dict(zip(headers, rule))
|
||||
if temp_rule:
|
||||
chain['rules'].append(temp_rule)
|
||||
|
||||
raw_output = list(filter(None, raw_output))
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
|
||||
@@ -77,7 +77,7 @@ import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.0'
|
||||
version = '1.2'
|
||||
description = 'jobs command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -144,12 +144,10 @@ def parse(data, raw=False, quiet=False):
|
||||
|
||||
raw_output = []
|
||||
|
||||
linedata = data.splitlines()
|
||||
|
||||
# Clear any blank lines
|
||||
cleandata = list(filter(None, linedata))
|
||||
cleandata = list(filter(None, data.splitlines()))
|
||||
|
||||
if cleandata:
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
for entry in cleandata:
|
||||
output_line = {}
|
||||
@@ -176,11 +174,11 @@ def parse(data, raw=False, quiet=False):
|
||||
parsed_line.insert(0, job_number)
|
||||
|
||||
# check for + or - in first field
|
||||
if parsed_line[0].find('+') != -1:
|
||||
if '+' in parsed_line[0]:
|
||||
job_history = 'current'
|
||||
parsed_line[0] = parsed_line[0].rstrip('+')
|
||||
|
||||
if parsed_line[0].find('-') != -1:
|
||||
if '-' in parsed_line[0]:
|
||||
job_history = 'previous'
|
||||
parsed_line[0] = parsed_line[0].rstrip('-')
|
||||
|
||||
|
||||
194
jc/parsers/last.py
Normal file
194
jc/parsers/last.py
Normal file
@@ -0,0 +1,194 @@
|
||||
"""jc - JSON CLI output utility last Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --last as the first argument if the piped input is coming from last or lastb
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux', 'darwin', 'aix', 'freebsd'
|
||||
|
||||
Examples:
|
||||
|
||||
$ last | jc --last -p
|
||||
[
|
||||
{
|
||||
"user": "kbrazil",
|
||||
"tty": "ttys002",
|
||||
"hostname": null,
|
||||
"login": "Thu Feb 27 14:31",
|
||||
"logout": "still logged in"
|
||||
},
|
||||
{
|
||||
"user": "kbrazil",
|
||||
"tty": "ttys003",
|
||||
"hostname": null,
|
||||
"login": "Thu Feb 27 10:38",
|
||||
"logout": "10:38",
|
||||
"duration": "00:00"
|
||||
},
|
||||
{
|
||||
"user": "kbrazil",
|
||||
"tty": "ttys003",
|
||||
"hostname": null,
|
||||
"login": "Thu Feb 27 10:18",
|
||||
"logout": "10:18",
|
||||
"duration": "00:00"
|
||||
},
|
||||
...
|
||||
]
|
||||
|
||||
$ last | jc --last -p -r
|
||||
[
|
||||
{
|
||||
"user": "kbrazil",
|
||||
"tty": "ttys002",
|
||||
"hostname": "-",
|
||||
"login": "Thu Feb 27 14:31",
|
||||
"logout": "still_logged_in"
|
||||
},
|
||||
{
|
||||
"user": "kbrazil",
|
||||
"tty": "ttys003",
|
||||
"hostname": "-",
|
||||
"login": "Thu Feb 27 10:38",
|
||||
"logout": "10:38",
|
||||
"duration": "00:00"
|
||||
},
|
||||
{
|
||||
"user": "kbrazil",
|
||||
"tty": "ttys003",
|
||||
"hostname": "-",
|
||||
"login": "Thu Feb 27 10:18",
|
||||
"logout": "10:18",
|
||||
"duration": "00:00"
|
||||
},
|
||||
...
|
||||
]
|
||||
|
||||
"""
|
||||
import re
|
||||
import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.3'
|
||||
description = 'last and lastb command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
# details = 'enter any other details here'
|
||||
|
||||
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
|
||||
compatible = ['linux', 'darwin', 'aix', 'freebsd']
|
||||
magic_commands = ['last', 'lastb']
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
|
||||
def process(proc_data):
|
||||
"""
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Structured data with the following schema:
|
||||
|
||||
[
|
||||
{
|
||||
"user": string,
|
||||
"tty": string,
|
||||
"hostname": string,
|
||||
"login": string,
|
||||
"logout": string,
|
||||
"duration": string
|
||||
}
|
||||
]
|
||||
"""
|
||||
for entry in proc_data:
|
||||
if 'user' in entry and entry['user'] == 'boot_time':
|
||||
entry['user'] = 'boot time'
|
||||
|
||||
if 'tty' in entry and entry['tty'] == '~':
|
||||
entry['tty'] = None
|
||||
|
||||
if 'tty' in entry and entry['tty'] == 'system_boot':
|
||||
entry['tty'] = 'system boot'
|
||||
|
||||
if 'hostname' in entry and entry['hostname'] == '-':
|
||||
entry['hostname'] = None
|
||||
|
||||
if 'logout' in entry and entry['logout'] == 'still_logged_in':
|
||||
entry['logout'] = 'still logged in'
|
||||
|
||||
if 'logout' in entry and entry['logout'] == 'gone_-_no_logout':
|
||||
entry['logout'] = 'gone - no logout'
|
||||
|
||||
return proc_data
|
||||
|
||||
|
||||
def parse(data, raw=False, quiet=False):
|
||||
"""
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Raw or processed structured data.
|
||||
"""
|
||||
if not quiet:
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
raw_output = []
|
||||
|
||||
# Clear any blank lines
|
||||
cleandata = list(filter(None, data.splitlines()))
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
for entry in cleandata:
|
||||
output_line = {}
|
||||
|
||||
if entry.startswith('wtmp begins ') or entry.startswith('btmp begins ') or entry.startswith('utx.log begins '):
|
||||
continue
|
||||
|
||||
entry = entry.replace('system boot', 'system_boot')
|
||||
entry = entry.replace('boot time', 'boot_time')
|
||||
entry = entry.replace(' still logged in', '- still_logged_in')
|
||||
entry = entry.replace(' gone - no logout', '- gone_-_no_logout')
|
||||
|
||||
linedata = entry.split()
|
||||
if re.match(r'[MTWFS][ouerha][nedritnu] [JFMASOND][aepuco][nbrynlgptvc]', ' '.join(linedata[2:4])):
|
||||
linedata.insert(2, '-')
|
||||
|
||||
# freebsd fix
|
||||
if linedata[0] == 'boot_time':
|
||||
linedata.insert(1, '-')
|
||||
linedata.insert(1, '~')
|
||||
|
||||
output_line['user'] = linedata[0]
|
||||
output_line['tty'] = linedata[1]
|
||||
output_line['hostname'] = linedata[2]
|
||||
output_line['login'] = ' '.join(linedata[3:7])
|
||||
|
||||
if len(linedata) > 8:
|
||||
output_line['logout'] = linedata[8]
|
||||
|
||||
if len(linedata) > 9:
|
||||
output_line['duration'] = linedata[9].replace('(', '').replace(')', '')
|
||||
|
||||
raw_output.append(output_line)
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
else:
|
||||
return process(raw_output)
|
||||
@@ -1,19 +1,20 @@
|
||||
"""jc - JSON CLI output utility ls Parser
|
||||
|
||||
Note: The -l option of ls should be used to correctly parse filenames that include newline characters.
|
||||
Note: The -l or -b option of ls should be used to correctly parse filenames that include newline characters.
|
||||
Since ls does not encode newlines in filenames when outputting to a pipe it will cause jc to see
|
||||
multiple files instead of a single file if -l is not used.
|
||||
multiple files instead of a single file if -l or -b is not used.
|
||||
|
||||
Usage:
|
||||
|
||||
specify --ls as the first argument if the piped input is coming from ls
|
||||
|
||||
ls options supported:
|
||||
- laR
|
||||
|
||||
-lbaR
|
||||
--time-style=full-iso
|
||||
- h file sizes will be available in text form with -r but larger file sizes
|
||||
with human readable suffixes will be converted to Null in default view
|
||||
since the parser attempts to convert this field to an integer.
|
||||
-h file sizes will be available in text form with -r but larger file sizes
|
||||
with human readable suffixes will be converted to Null in default view
|
||||
since the parser attempts to convert this field to an integer.
|
||||
|
||||
Compatibility:
|
||||
|
||||
@@ -148,7 +149,7 @@ import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.2'
|
||||
version = '1.6'
|
||||
description = 'ls command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -225,27 +226,28 @@ def parse(data, raw=False, quiet=False):
|
||||
|
||||
linedata = data.splitlines()
|
||||
|
||||
# Delete first line if it starts with 'total 1234'
|
||||
if linedata:
|
||||
if re.match('^total [0-9]+', linedata[0]):
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
# Delete first line if it starts with 'total 1234'
|
||||
if re.match(r'total [0-9]+', linedata[0]):
|
||||
linedata.pop(0)
|
||||
|
||||
# Look for parent line if glob or -R is used
|
||||
if not re.match('^[-dclpsbDCMnP?]([-r][-w][-xsS]){2}([-r][-w][-xtT])[+]?', linedata[0]) \
|
||||
and linedata[0].endswith(':'):
|
||||
parent = linedata.pop(0)[:-1]
|
||||
# Pop following total line
|
||||
linedata.pop(0)
|
||||
# Look for parent line if glob or -R is used
|
||||
if not re.match(r'[-dclpsbDCMnP?]([-r][-w][-xsS]){2}([-r][-w][-xtT])[+]?', linedata[0]) \
|
||||
and linedata[0].endswith(':'):
|
||||
parent = linedata.pop(0)[:-1]
|
||||
# Pop following total line if it exists
|
||||
if re.match(r'total [0-9]+', linedata[0]):
|
||||
linedata.pop(0)
|
||||
|
||||
if linedata:
|
||||
# Check if -l was used to parse extra data
|
||||
if re.match('^[-dclpsbDCMnP?]([-r][-w][-xsS]){2}([-r][-w][-xtT])[+]?', linedata[0]):
|
||||
if re.match(r'[-dclpsbDCMnP?]([-r][-w][-xsS]){2}([-r][-w][-xtT])[+]?', linedata[0]):
|
||||
for entry in linedata:
|
||||
output_line = {}
|
||||
|
||||
parsed_line = entry.split(maxsplit=8)
|
||||
|
||||
if not re.match('^[-dclpsbDCMnP?]([-r][-w][-xsS]){2}([-r][-w][-xtT])[+]?', entry) \
|
||||
if not re.match(r'[-dclpsbDCMnP?]([-r][-w][-xsS]){2}([-r][-w][-xtT])[+]?', entry) \
|
||||
and entry.endswith(':'):
|
||||
parent = entry[:-1]
|
||||
new_section = True
|
||||
@@ -254,13 +256,18 @@ def parse(data, raw=False, quiet=False):
|
||||
raw_output[-1]['filename'] = raw_output[-1]['filename'][:-1]
|
||||
continue
|
||||
|
||||
if re.match('^total [0-9]+', entry):
|
||||
if re.match(r'total [0-9]+', entry):
|
||||
new_section = False
|
||||
continue
|
||||
|
||||
# fix for OSX - doesn't print 'total xx' line if empty directory
|
||||
if new_section and entry == '':
|
||||
new_section = False
|
||||
continue
|
||||
|
||||
# fixup for filenames with newlines
|
||||
if not new_section \
|
||||
and not re.match('^[-dclpsbDCMnP?]([-r][-w][-xsS]){2}([-r][-w][-xtT])[+]?', entry):
|
||||
and not re.match(r'[-dclpsbDCMnP?]([-r][-w][-xsS]){2}([-r][-w][-xtT])[+]?', entry):
|
||||
raw_output[-1]['filename'] = raw_output[-1]['filename'] + '\n' + entry
|
||||
continue
|
||||
|
||||
@@ -301,7 +308,7 @@ def parse(data, raw=False, quiet=False):
|
||||
continue
|
||||
|
||||
if not quiet and next_is_parent and not entry.endswith(':') and not warned:
|
||||
jc.utils.warning_message('Newline characters detected. Filenames probably corrupted. Use ls -l instead.')
|
||||
jc.utils.warning_message('Newline characters detected. Filenames probably corrupted. Use ls -l or -b instead.')
|
||||
warned = True
|
||||
|
||||
output_line['filename'] = entry
|
||||
|
||||
@@ -216,7 +216,7 @@ import jc.parsers.universal
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.3'
|
||||
version = '1.5'
|
||||
description = 'lsblk command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -327,20 +327,23 @@ def parse(data, raw=False, quiet=False):
|
||||
if not quiet:
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
linedata = data.splitlines()
|
||||
# Clear any blank lines
|
||||
cleandata = list(filter(None, linedata))
|
||||
cleandata = data.splitlines()
|
||||
cleandata = list(filter(None, data.splitlines()))
|
||||
raw_output = []
|
||||
|
||||
cleandata[0] = cleandata[0].lower()
|
||||
cleandata[0] = cleandata[0].replace(':', '_')
|
||||
cleandata[0] = cleandata[0].replace('-', '_')
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
raw_output = jc.parsers.universal.sparse_table_parse(cleandata)
|
||||
cleandata = data.splitlines()
|
||||
|
||||
# clean up non-ascii characters, if any
|
||||
for entry in raw_output:
|
||||
entry['name'] = entry['name'].encode('ascii', errors='ignore').decode()
|
||||
cleandata[0] = cleandata[0].lower()
|
||||
cleandata[0] = cleandata[0].replace(':', '_')
|
||||
cleandata[0] = cleandata[0].replace('-', '_')
|
||||
|
||||
raw_output = jc.parsers.universal.sparse_table_parse(cleandata)
|
||||
|
||||
# clean up non-ascii characters, if any
|
||||
for entry in raw_output:
|
||||
entry['name'] = entry['name'].encode('ascii', errors='ignore').decode()
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
|
||||
@@ -107,7 +107,7 @@ import jc.parsers.universal
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.1'
|
||||
version = '1.3'
|
||||
description = 'lsmod command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -175,13 +175,17 @@ def parse(data, raw=False, quiet=False):
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
cleandata = data.splitlines()
|
||||
cleandata[0] = cleandata[0].lower()
|
||||
raw_output = []
|
||||
|
||||
raw_output = jc.parsers.universal.simple_table_parse(cleandata)
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
for mod in raw_output:
|
||||
if 'by' in mod:
|
||||
mod['by'] = mod['by'].split(',')
|
||||
cleandata[0] = cleandata[0].lower()
|
||||
|
||||
raw_output = jc.parsers.universal.simple_table_parse(cleandata)
|
||||
|
||||
for mod in raw_output:
|
||||
if 'by' in mod:
|
||||
mod['by'] = mod['by'].split(',')
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
|
||||
@@ -97,7 +97,7 @@ import jc.parsers.universal
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.0'
|
||||
version = '1.2'
|
||||
description = 'lsof command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -169,58 +169,16 @@ def parse(data, raw=False, quiet=False):
|
||||
|
||||
raw_output = []
|
||||
|
||||
linedata = data.splitlines()
|
||||
|
||||
# Clear any blank lines
|
||||
cleandata = list(filter(None, linedata))
|
||||
cleandata = list(filter(None, data.splitlines()))
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
if cleandata:
|
||||
cleandata[0] = cleandata[0].lower()
|
||||
cleandata[0] = cleandata[0].replace('/', '_')
|
||||
|
||||
raw_output = jc.parsers.universal.sparse_table_parse(cleandata)
|
||||
|
||||
'''
|
||||
# find column value of last character of each header
|
||||
header_text = cleandata.pop(0).lower()
|
||||
|
||||
# clean up 'size/off' header
|
||||
# even though forward slash in a key is valid json, it can make things difficult
|
||||
header_row = header_text.replace('/', '_')
|
||||
|
||||
headers = header_row.split()
|
||||
|
||||
header_spec = []
|
||||
for i, h in enumerate(headers):
|
||||
# header tuple is (index, header_name, col)
|
||||
header_spec.append((i, h, header_row.find(h) + len(h)))
|
||||
|
||||
# parse lines
|
||||
for entry in cleandata:
|
||||
output_line = {}
|
||||
|
||||
# normalize data by inserting Null for missing data
|
||||
temp_line = entry.split(maxsplit=len(headers) - 1)
|
||||
|
||||
for spec in header_spec:
|
||||
|
||||
index = spec[0]
|
||||
header_name = spec[1]
|
||||
col = spec[2] - 1 # subtract one since column starts at 0 instead of 1
|
||||
|
||||
if header_name == 'command' or header_name == 'name':
|
||||
continue
|
||||
if entry[col] in string.whitespace:
|
||||
temp_line.insert(index, None)
|
||||
|
||||
name = ' '.join(temp_line[9:])
|
||||
fixed_line = temp_line[0:9]
|
||||
fixed_line.append(name)
|
||||
|
||||
output_line = dict(zip(headers, fixed_line))
|
||||
raw_output.append(output_line)
|
||||
'''
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
else:
|
||||
|
||||
@@ -6,7 +6,7 @@ Usage:
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux', 'darwin'
|
||||
'linux', 'darwin', 'freebsd'
|
||||
|
||||
Example:
|
||||
|
||||
@@ -56,13 +56,13 @@ import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.1'
|
||||
version = '1.5'
|
||||
description = 'mount command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
|
||||
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
|
||||
compatible = ['linux', 'darwin']
|
||||
compatible = ['linux', 'darwin', 'freebsd']
|
||||
magic_commands = ['mount']
|
||||
|
||||
|
||||
@@ -158,14 +158,14 @@ def parse(data, raw=False, quiet=False):
|
||||
if not quiet:
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
linedata = data.splitlines()
|
||||
|
||||
# Clear any blank lines
|
||||
cleandata = list(filter(None, linedata))
|
||||
cleandata = list(filter(None, data.splitlines()))
|
||||
raw_output = []
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
if cleandata:
|
||||
# check for OSX output
|
||||
if cleandata[0].find(' type ') == -1:
|
||||
if ' type ' not in cleandata[0]:
|
||||
raw_output = osx_parse(cleandata)
|
||||
|
||||
else:
|
||||
|
||||
@@ -4,13 +4,18 @@ Usage:
|
||||
|
||||
Specify --netstat as the first argument if the piped input is coming from netstat
|
||||
|
||||
Caveats:
|
||||
|
||||
- Use of multiple 'l' options is not supported on OSX (e.g. 'netstat -rlll')
|
||||
- Use of the 'A' option is not supported on OSX when using the 'r' option (e.g. netstat -rA)
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux'
|
||||
'linux', 'darwin', 'freebsd'
|
||||
|
||||
Examples:
|
||||
|
||||
$ sudo netstat -apee | jc --netstat -p
|
||||
# netstat -apee | jc --netstat -p
|
||||
[
|
||||
{
|
||||
"proto": "tcp",
|
||||
@@ -160,166 +165,95 @@ Examples:
|
||||
...
|
||||
]
|
||||
|
||||
$ sudo netstat -apee | jc --netstat -p -r
|
||||
$ netstat -r | jc --netstat -p
|
||||
[
|
||||
{
|
||||
"proto": "tcp",
|
||||
"recv_q": "0",
|
||||
"send_q": "0",
|
||||
"local_address": "localhost",
|
||||
"foreign_address": "0.0.0.0",
|
||||
"state": "LISTEN",
|
||||
"user": "systemd-resolve",
|
||||
"inode": "26958",
|
||||
"program_name": "systemd-resolve",
|
||||
"kind": "network",
|
||||
"pid": "887",
|
||||
"local_port": "domain",
|
||||
"foreign_port": "*",
|
||||
"transport_protocol": "tcp",
|
||||
"network_protocol": "ipv4"
|
||||
"destination": "default",
|
||||
"gateway": "gateway",
|
||||
"genmask": "0.0.0.0",
|
||||
"route_flags": "UG",
|
||||
"mss": 0,
|
||||
"window": 0,
|
||||
"irtt": 0,
|
||||
"iface": "ens33",
|
||||
"kind": "route",
|
||||
"route_flags_pretty": [
|
||||
"UP",
|
||||
"GATEWAY"
|
||||
]
|
||||
},
|
||||
{
|
||||
"proto": "tcp",
|
||||
"recv_q": "0",
|
||||
"send_q": "0",
|
||||
"local_address": "0.0.0.0",
|
||||
"foreign_address": "0.0.0.0",
|
||||
"state": "LISTEN",
|
||||
"user": "root",
|
||||
"inode": "30499",
|
||||
"program_name": "sshd",
|
||||
"kind": "network",
|
||||
"pid": "1186",
|
||||
"local_port": "ssh",
|
||||
"foreign_port": "*",
|
||||
"transport_protocol": "tcp",
|
||||
"network_protocol": "ipv4"
|
||||
"destination": "172.17.0.0",
|
||||
"gateway": "0.0.0.0",
|
||||
"genmask": "255.255.0.0",
|
||||
"route_flags": "U",
|
||||
"mss": 0,
|
||||
"window": 0,
|
||||
"irtt": 0,
|
||||
"iface": "docker0",
|
||||
"kind": "route",
|
||||
"route_flags_pretty": [
|
||||
"UP"
|
||||
]
|
||||
},
|
||||
{
|
||||
"proto": "tcp",
|
||||
"recv_q": "0",
|
||||
"send_q": "0",
|
||||
"local_address": "localhost",
|
||||
"foreign_address": "localhost",
|
||||
"state": "ESTABLISHED",
|
||||
"user": "root",
|
||||
"inode": "46829",
|
||||
"program_name": "sshd: root",
|
||||
"kind": "network",
|
||||
"pid": "2242",
|
||||
"local_port": "ssh",
|
||||
"foreign_port": "52186",
|
||||
"transport_protocol": "tcp",
|
||||
"network_protocol": "ipv4"
|
||||
"destination": "192.168.71.0",
|
||||
"gateway": "0.0.0.0",
|
||||
"genmask": "255.255.255.0",
|
||||
"route_flags": "U",
|
||||
"mss": 0,
|
||||
"window": 0,
|
||||
"irtt": 0,
|
||||
"iface": "ens33",
|
||||
"kind": "route",
|
||||
"route_flags_pretty": [
|
||||
"UP"
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
$ netstat -i | jc --netstat -p
|
||||
[
|
||||
{
|
||||
"iface": "ens33",
|
||||
"mtu": 1500,
|
||||
"rx_ok": 476,
|
||||
"rx_err": 0,
|
||||
"rx_drp": 0,
|
||||
"rx_ovr": 0,
|
||||
"tx_ok": 312,
|
||||
"tx_err": 0,
|
||||
"tx_drp": 0,
|
||||
"tx_ovr": 0,
|
||||
"flg": "BMRU",
|
||||
"kind": "interface"
|
||||
},
|
||||
{
|
||||
"proto": "tcp",
|
||||
"recv_q": "0",
|
||||
"send_q": "0",
|
||||
"local_address": "localhost",
|
||||
"foreign_address": "localhost",
|
||||
"state": "ESTABLISHED",
|
||||
"user": "root",
|
||||
"inode": "46828",
|
||||
"program_name": "ssh",
|
||||
"kind": "network",
|
||||
"pid": "2241",
|
||||
"local_port": "52186",
|
||||
"foreign_port": "ssh",
|
||||
"transport_protocol": "tcp",
|
||||
"network_protocol": "ipv4"
|
||||
},
|
||||
{
|
||||
"proto": "tcp6",
|
||||
"recv_q": "0",
|
||||
"send_q": "0",
|
||||
"local_address": "[::]",
|
||||
"foreign_address": "[::]",
|
||||
"state": "LISTEN",
|
||||
"user": "root",
|
||||
"inode": "30510",
|
||||
"program_name": "sshd",
|
||||
"kind": "network",
|
||||
"pid": "1186",
|
||||
"local_port": "ssh",
|
||||
"foreign_port": "*",
|
||||
"transport_protocol": "tcp",
|
||||
"network_protocol": "ipv6"
|
||||
},
|
||||
{
|
||||
"proto": "udp",
|
||||
"recv_q": "0",
|
||||
"send_q": "0",
|
||||
"local_address": "localhost",
|
||||
"foreign_address": "0.0.0.0",
|
||||
"state": null,
|
||||
"user": "systemd-resolve",
|
||||
"inode": "26957",
|
||||
"program_name": "systemd-resolve",
|
||||
"kind": "network",
|
||||
"pid": "887",
|
||||
"local_port": "domain",
|
||||
"foreign_port": "*",
|
||||
"transport_protocol": "udp",
|
||||
"network_protocol": "ipv4"
|
||||
},
|
||||
{
|
||||
"proto": "raw6",
|
||||
"recv_q": "0",
|
||||
"send_q": "0",
|
||||
"local_address": "[::]",
|
||||
"foreign_address": "[::]",
|
||||
"state": "7",
|
||||
"user": "systemd-network",
|
||||
"inode": "27001",
|
||||
"program_name": "systemd-network",
|
||||
"kind": "network",
|
||||
"pid": "867",
|
||||
"local_port": "ipv6-icmp",
|
||||
"foreign_port": "*",
|
||||
"transport_protocol": null,
|
||||
"network_protocol": "ipv6"
|
||||
},
|
||||
{
|
||||
"proto": "unix",
|
||||
"refcnt": "2",
|
||||
"flags": null,
|
||||
"type": "DGRAM",
|
||||
"state": null,
|
||||
"inode": "33322",
|
||||
"program_name": "systemd",
|
||||
"path": "/run/user/1000/systemd/notify",
|
||||
"kind": "socket",
|
||||
"pid": " 1607"
|
||||
},
|
||||
{
|
||||
"proto": "unix",
|
||||
"refcnt": "2",
|
||||
"flags": "ACC",
|
||||
"type": "SEQPACKET",
|
||||
"state": "LISTENING",
|
||||
"inode": "20835",
|
||||
"program_name": "init",
|
||||
"path": "/run/udev/control",
|
||||
"kind": "socket",
|
||||
"pid": " 1"
|
||||
},
|
||||
...
|
||||
"iface": "lo",
|
||||
"mtu": 65536,
|
||||
"rx_ok": 0,
|
||||
"rx_err": 0,
|
||||
"rx_drp": 0,
|
||||
"rx_ovr": 0,
|
||||
"tx_ok": 0,
|
||||
"tx_err": 0,
|
||||
"tx_drp": 0,
|
||||
"tx_ovr": 0,
|
||||
"flg": "LRU",
|
||||
"kind": "interface"
|
||||
}
|
||||
]
|
||||
"""
|
||||
import string
|
||||
import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.2'
|
||||
version = '1.8'
|
||||
description = 'netstat command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
|
||||
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
|
||||
compatible = ['linux']
|
||||
compatible = ['linux', 'darwin', 'freebsd']
|
||||
magic_commands = ['netstat']
|
||||
|
||||
|
||||
@@ -340,34 +274,112 @@ def process(proc_data):
|
||||
|
||||
[
|
||||
{
|
||||
"proto": string,
|
||||
"recv_q": integer,
|
||||
"send_q": integer,
|
||||
"transport_protocol" string,
|
||||
"network_protocol": string,
|
||||
"local_address": string,
|
||||
"local_port": string,
|
||||
"local_port_num": integer,
|
||||
"foreign_address": string,
|
||||
"foreign_port": string,
|
||||
"foreign_port_num": integer,
|
||||
"state": string,
|
||||
"program_name": string,
|
||||
"pid": integer,
|
||||
"user": string,
|
||||
"security_context": string,
|
||||
"refcnt": integer,
|
||||
"flags": string,
|
||||
"type": string,
|
||||
"inode": integer,
|
||||
"path": string,
|
||||
"kind": string
|
||||
"proto": string,
|
||||
"recv_q": integer,
|
||||
"send_q": integer,
|
||||
"transport_protocol" string,
|
||||
"network_protocol": string,
|
||||
"local_address": string,
|
||||
"local_port": string,
|
||||
"local_port_num": integer,
|
||||
"foreign_address": string,
|
||||
"foreign_port": string,
|
||||
"foreign_port_num": integer,
|
||||
"state": string,
|
||||
"program_name": string,
|
||||
"pid": integer,
|
||||
"user": string,
|
||||
"security_context": string,
|
||||
"refcnt": integer,
|
||||
"flags": string,
|
||||
"type": string,
|
||||
"inode": integer,
|
||||
"path": string,
|
||||
"kind": string,
|
||||
"address": string,
|
||||
"unix_inode": string,
|
||||
"conn": string,
|
||||
"refs": string,
|
||||
"nextref": string,
|
||||
"name": string,
|
||||
"unit": integer,
|
||||
"vendor": integer,
|
||||
"class": integer,
|
||||
"subcla": integer,
|
||||
"unix_flags": integer,
|
||||
"pcbcount": integer,
|
||||
"rcvbuf": integer,
|
||||
"sndbuf": integer,
|
||||
"rxbytes": integer,
|
||||
"txbytes": integer,
|
||||
"destination": string,
|
||||
"gateway": string,
|
||||
"route_flags": string,
|
||||
"route_flags_pretty": [
|
||||
string,
|
||||
]
|
||||
"route_refs": integer,
|
||||
"use": integer,
|
||||
"mtu": integer,
|
||||
"expire": string,
|
||||
"genmask": string,
|
||||
"mss": integer,
|
||||
"window": integer,
|
||||
"irtt": integer,
|
||||
"iface": string,
|
||||
"metric": integer,
|
||||
"network": string,
|
||||
"address": string,
|
||||
"ipkts": integer, - = null
|
||||
"ierrs": integer, - = null
|
||||
"idrop": integer, - = null
|
||||
"opkts": integer, - = null
|
||||
"oerrs": integer, - = null
|
||||
"coll": integer, - = null
|
||||
"rx_ok": integer,
|
||||
"rx_err": integer,
|
||||
"rx_drp": integer,
|
||||
"rx_ovr": integer,
|
||||
"tx_ok": integer,
|
||||
"tx_err": integer,
|
||||
"tx_drp": integer,
|
||||
"tx_ovr": integer,
|
||||
"flg": string,
|
||||
"ibytes": integer,
|
||||
"obytes": integer,
|
||||
"r_mbuf": integer,
|
||||
"s_mbuf": integer,
|
||||
"r_clus": integer,
|
||||
"s_clus": integer,
|
||||
"r_hiwa": integer,
|
||||
"s_hiwa": integer,
|
||||
"r_lowa": integer,
|
||||
"s_lowa": integer,
|
||||
"r_bcnt": integer,
|
||||
"s_bcnt": integer,
|
||||
"r_bmax": integer,
|
||||
"s_bmax": integer,
|
||||
"rexmit": integer,
|
||||
"ooorcv": integer,
|
||||
"0_win": integer,
|
||||
"rexmt": float,
|
||||
"persist": float,
|
||||
"keep": float,
|
||||
"2msl": float,
|
||||
"delack": float,
|
||||
"rcvtime": float,
|
||||
}
|
||||
]
|
||||
"""
|
||||
for entry in proc_data:
|
||||
# integer changes
|
||||
int_list = ['recv_q', 'send_q', 'pid', 'refcnt', 'inode']
|
||||
int_list = ['recv_q', 'send_q', 'pid', 'refcnt', 'inode', 'unit', 'vendor', 'class',
|
||||
'osx_flags', 'subcla', 'pcbcount', 'rcvbuf', 'sndbuf', 'rxbytes', 'txbytes',
|
||||
'route_refs', 'use', 'mtu', 'mss', 'window', 'irtt', 'metric', 'ipkts',
|
||||
'ierrs', 'opkts', 'oerrs', 'coll', 'rx_ok', 'rx_err', 'rx_drp', 'rx_ovr',
|
||||
'tx_ok', 'tx_err', 'tx_drp', 'tx_ovr', 'idrop', 'ibytes', 'obytes', 'r_mbuf',
|
||||
's_mbuf', 'r_clus', 's_clus', 'r_hiwa', 's_hiwa', 'r_lowa', 's_lowa', 'r_bcnt',
|
||||
's_bcnt', 'r_bmax', 's_bmax', 'rexmit', 'ooorcv', '0_win']
|
||||
for key in int_list:
|
||||
if key in entry:
|
||||
try:
|
||||
@@ -376,6 +388,16 @@ def process(proc_data):
|
||||
except (ValueError):
|
||||
entry[key] = None
|
||||
|
||||
# float changes
|
||||
float_list = ['rexmt', 'persist', 'keep', '2msl', 'delack', 'rcvtime']
|
||||
for key in float_list:
|
||||
if key in entry:
|
||||
try:
|
||||
key_float = float(entry[key])
|
||||
entry[key] = key_float
|
||||
except (ValueError):
|
||||
entry[key] = None
|
||||
|
||||
if 'local_port' in entry:
|
||||
try:
|
||||
entry['local_port_num'] = int(entry['local_port'])
|
||||
@@ -391,128 +413,6 @@ def process(proc_data):
|
||||
return proc_data
|
||||
|
||||
|
||||
def normalize_headers(header):
|
||||
header = header.lower()
|
||||
header = header.replace('local address', 'local_address')
|
||||
header = header.replace('foreign address', 'foreign_address')
|
||||
header = header.replace('pid/program name', 'program_name')
|
||||
header = header.replace('security context', 'security_context')
|
||||
header = header.replace('i-node', 'inode')
|
||||
header = header.replace('-', '_')
|
||||
|
||||
return header
|
||||
|
||||
|
||||
def parse_network(headers, entry):
|
||||
# Count words in header
|
||||
# if len of line is one less than len of header, then insert None in field 5
|
||||
entry = entry.split(maxsplit=len(headers) - 1)
|
||||
|
||||
if len(entry) == len(headers) - 1:
|
||||
entry.insert(5, None)
|
||||
|
||||
output_line = dict(zip(headers, entry))
|
||||
output_line['kind'] = 'network'
|
||||
|
||||
return output_line
|
||||
|
||||
|
||||
def parse_socket(header_text, headers, entry):
|
||||
output_line = {}
|
||||
# get the column # of first letter of "state"
|
||||
state_col = header_text.find('state')
|
||||
# get the program name column area
|
||||
pn_start = header_text.find('program_name')
|
||||
pn_end = header_text.find('path') - 1
|
||||
|
||||
# remove [ and ] from each line
|
||||
entry = entry.replace('[ ]', '---')
|
||||
entry = entry.replace('[', ' ').replace(']', ' ')
|
||||
|
||||
# find program_name column area and substitute spaces with \u2063 there
|
||||
old_pn = entry[pn_start:pn_end]
|
||||
new_pn = old_pn.replace(' ', '\u2063')
|
||||
entry = entry.replace(old_pn, new_pn)
|
||||
|
||||
entry_list = entry.split(maxsplit=len(headers) - 1)
|
||||
# check column # to see if state column is populated
|
||||
if entry[state_col] in string.whitespace:
|
||||
entry_list.insert(4, None)
|
||||
|
||||
output_line = dict(zip(headers, entry_list))
|
||||
output_line['kind'] = 'socket'
|
||||
|
||||
# fix program_name field to turn \u2063 back to spaces
|
||||
if 'program_name' in output_line:
|
||||
if output_line['program_name']:
|
||||
old_d_pn = output_line['program_name']
|
||||
new_d_pn = old_d_pn.replace('\u2063', ' ')
|
||||
output_line['program_name'] = new_d_pn
|
||||
|
||||
return output_line
|
||||
|
||||
|
||||
def parse_post(raw_data):
|
||||
# clean up trailing whitespace on each item in each entry
|
||||
# flags --- = null
|
||||
# program_name - = null
|
||||
# split pid and program name and ip addresses and ports
|
||||
# create network and transport protocol fields
|
||||
|
||||
for entry in raw_data:
|
||||
for item in entry:
|
||||
try:
|
||||
entry[item] = entry[item].rstrip()
|
||||
except (AttributeError):
|
||||
# skips trying to rstrip Null entries
|
||||
pass
|
||||
|
||||
if 'flags' in entry:
|
||||
if entry['flags'] == '---':
|
||||
entry['flags'] = None
|
||||
|
||||
if 'program_name' in entry:
|
||||
entry['program_name'] = entry['program_name'].strip()
|
||||
if entry['program_name'] == '-':
|
||||
entry['program_name'] = None
|
||||
|
||||
if entry['program_name']:
|
||||
pid = entry['program_name'].split('/', maxsplit=1)[0]
|
||||
name = entry['program_name'].split('/', maxsplit=1)[1]
|
||||
entry['pid'] = pid
|
||||
entry['program_name'] = name
|
||||
|
||||
if 'local_address' in entry:
|
||||
if entry['local_address']:
|
||||
ladd = entry['local_address'].rsplit(':', maxsplit=1)[0]
|
||||
lport = entry['local_address'].rsplit(':', maxsplit=1)[1]
|
||||
entry['local_address'] = ladd
|
||||
entry['local_port'] = lport
|
||||
|
||||
if 'foreign_address' in entry:
|
||||
if entry['foreign_address']:
|
||||
fadd = entry['foreign_address'].rsplit(':', maxsplit=1)[0]
|
||||
fport = entry['foreign_address'].rsplit(':', maxsplit=1)[1]
|
||||
entry['foreign_address'] = fadd
|
||||
entry['foreign_port'] = fport
|
||||
|
||||
if 'proto' in entry and 'kind' in entry:
|
||||
if entry['kind'] == 'network':
|
||||
if entry['proto'].find('tcp') != -1:
|
||||
entry['transport_protocol'] = 'tcp'
|
||||
elif entry['proto'].find('udp') != -1:
|
||||
entry['transport_protocol'] = 'udp'
|
||||
else:
|
||||
entry['transport_protocol'] = None
|
||||
|
||||
if entry['proto'].find('6') != -1:
|
||||
entry['network_protocol'] = 'ipv6'
|
||||
else:
|
||||
entry['network_protocol'] = 'ipv4'
|
||||
|
||||
return raw_data
|
||||
|
||||
|
||||
def parse(data, raw=False, quiet=False):
|
||||
"""
|
||||
Main text parsing function
|
||||
@@ -527,51 +427,34 @@ def parse(data, raw=False, quiet=False):
|
||||
|
||||
List of dictionaries. Raw or processed structured data.
|
||||
"""
|
||||
import jc.utils
|
||||
if not quiet:
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
cleandata = data.splitlines()
|
||||
cleandata = list(filter(None, cleandata))
|
||||
|
||||
cleandata = list(filter(None, data.splitlines()))
|
||||
raw_output = []
|
||||
network = False
|
||||
socket = False
|
||||
headers = ''
|
||||
network_list = []
|
||||
socket_list = []
|
||||
|
||||
for line in cleandata:
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
if line.find('Active Internet') == 0:
|
||||
network_list = []
|
||||
network = True
|
||||
socket = False
|
||||
continue
|
||||
# check for FreeBSD/OSX vs Linux
|
||||
# is this from FreeBSD/OSX?
|
||||
if cleandata[0] == 'Active Internet connections' \
|
||||
or cleandata[0] == 'Active Internet connections (including servers)' \
|
||||
or cleandata[0] == 'Active Multipath Internet connections' \
|
||||
or cleandata[0] == 'Active LOCAL (UNIX) domain sockets' \
|
||||
or cleandata[0] == 'Registered kernel control modules' \
|
||||
or cleandata[0] == 'Active kernel event sockets' \
|
||||
or cleandata[0] == 'Active kernel control sockets' \
|
||||
or cleandata[0] == 'Routing tables' \
|
||||
or cleandata[0].startswith('Name '):
|
||||
|
||||
if line.find('Active UNIX') == 0:
|
||||
socket_list = []
|
||||
network = False
|
||||
socket = True
|
||||
continue
|
||||
import jc.parsers.netstat_freebsd_osx
|
||||
raw_output = jc.parsers.netstat_freebsd_osx.parse(cleandata)
|
||||
|
||||
if line.find('Proto') == 0:
|
||||
header_text = normalize_headers(line)
|
||||
headers = header_text.split()
|
||||
continue
|
||||
|
||||
if network:
|
||||
network_list.append(parse_network(headers, line))
|
||||
continue
|
||||
|
||||
if socket:
|
||||
socket_list.append(parse_socket(header_text, headers, line))
|
||||
continue
|
||||
|
||||
for item in [network_list, socket_list]:
|
||||
for entry in item:
|
||||
raw_output.append(entry)
|
||||
|
||||
raw_output = parse_post(raw_output)
|
||||
# use linux parser
|
||||
else:
|
||||
import jc.parsers.netstat_linux
|
||||
raw_output = jc.parsers.netstat_linux.parse(cleandata)
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
|
||||
320
jc/parsers/netstat_freebsd_osx.py
Normal file
320
jc/parsers/netstat_freebsd_osx.py
Normal file
@@ -0,0 +1,320 @@
|
||||
"""jc - JSON CLI output utility FreeBSD and OSX netstat Parser"""
|
||||
|
||||
|
||||
def normalize_headers(header):
|
||||
header = header.lower()
|
||||
header = header.replace('local address', 'local_address')
|
||||
header = header.replace('foreign address', 'foreign_address')
|
||||
header = header.replace('(state)', 'state')
|
||||
header = header.replace('inode', 'unix_inode')
|
||||
header = header.replace('flags', 'unix_flags')
|
||||
header = header.replace('-', '_')
|
||||
|
||||
return header
|
||||
|
||||
|
||||
def normalize_route_headers(header):
|
||||
header = header.lower()
|
||||
header = header.replace('flags', 'route_flags')
|
||||
header = header.replace('refs', 'route_refs')
|
||||
header = header.replace('netif', 'iface')
|
||||
header = header.replace('-', '_')
|
||||
|
||||
return header
|
||||
|
||||
|
||||
def normalize_interface_headers(header):
|
||||
header = header.lower()
|
||||
header = header.replace('name', 'iface')
|
||||
header = header.replace('-', '_')
|
||||
|
||||
return header
|
||||
|
||||
|
||||
def parse_item(headers, entry, kind):
|
||||
entry = entry.split(maxsplit=len(headers) - 1)
|
||||
|
||||
# fixup udp records with no state field entry
|
||||
if kind == 'network' and entry[0].startswith('udp'):
|
||||
entry.insert(5, None)
|
||||
|
||||
if kind == 'network' and 'socket' in headers and 'udp' in str(entry):
|
||||
entry.insert(7, None)
|
||||
|
||||
# fixup -T output on FreeBSD
|
||||
if kind == 'network' and '0_win' in headers and entry[0].startswith('udp'):
|
||||
entry.insert(1, '')
|
||||
entry.insert(1, '')
|
||||
entry.insert(1, '')
|
||||
|
||||
# fixup interface records with no address field entry
|
||||
if kind == 'interface' and len(entry) == 8:
|
||||
entry.insert(3, None)
|
||||
|
||||
output_line = dict(zip(headers, entry))
|
||||
output_line['kind'] = kind
|
||||
|
||||
return output_line
|
||||
|
||||
|
||||
def parse_post(raw_data):
|
||||
for entry in raw_data:
|
||||
# fixup name field in Registered kernel control module
|
||||
if 'name' in entry:
|
||||
if entry['name']:
|
||||
entry['name'] = entry['name'].strip()
|
||||
|
||||
# create network and transport protocol fields
|
||||
if 'local_address' in entry:
|
||||
if entry['local_address']:
|
||||
ladd = entry['local_address'].rsplit('.', maxsplit=1)[0]
|
||||
lport = entry['local_address'].rsplit('.', maxsplit=1)[1]
|
||||
entry['local_address'] = ladd
|
||||
entry['local_port'] = lport
|
||||
|
||||
if 'foreign_address' in entry:
|
||||
if entry['foreign_address']:
|
||||
fadd = entry['foreign_address'].rsplit('.', maxsplit=1)[0]
|
||||
fport = entry['foreign_address'].rsplit('.', maxsplit=1)[1]
|
||||
entry['foreign_address'] = fadd
|
||||
entry['foreign_port'] = fport
|
||||
|
||||
if 'proto' in entry and 'kind' in entry:
|
||||
if entry['kind'] == 'network':
|
||||
if entry['proto'] == 'udp46':
|
||||
entry['transport_protocol'] = entry['proto'][:-2]
|
||||
elif entry['proto'].startswith('icm'):
|
||||
entry['transport_protocol'] = 'icmp'
|
||||
else:
|
||||
entry['transport_protocol'] = entry['proto'][:-1]
|
||||
|
||||
if '6' in entry['proto']:
|
||||
entry['network_protocol'] = 'ipv6'
|
||||
else:
|
||||
entry['network_protocol'] = 'ipv4'
|
||||
|
||||
# add route_flags_pretty field
|
||||
if 'route_flags' in entry:
|
||||
flag_map = {
|
||||
'1': 'PROTO1',
|
||||
'2': 'PROTO2',
|
||||
'3': 'PROTO3',
|
||||
'B': 'BLACKHOLE',
|
||||
'b': 'BROADCAST',
|
||||
'C': 'CLONING',
|
||||
'c': 'PRCLONING',
|
||||
'D': 'DYNAMIC',
|
||||
'G': 'GATEWAY',
|
||||
'H': 'HOST',
|
||||
'I': 'IFSCOPE',
|
||||
'i': 'IFREF',
|
||||
'L': 'LLINFO',
|
||||
'M': 'MODIFIED',
|
||||
'm': 'MULTICAST',
|
||||
'R': 'REJECT',
|
||||
'r': 'ROUTER',
|
||||
'S': 'STATIC',
|
||||
'U': 'UP',
|
||||
'W': 'WASCLONED',
|
||||
'X': 'XRESOLVE',
|
||||
'Y': 'PROXY',
|
||||
}
|
||||
|
||||
pretty_flags = []
|
||||
|
||||
for flag in entry['route_flags']:
|
||||
if flag in flag_map:
|
||||
pretty_flags.append(flag_map[flag])
|
||||
|
||||
entry['route_flags_pretty'] = pretty_flags
|
||||
|
||||
# strip whitespace from beginning and end of all string values
|
||||
for item in entry:
|
||||
if isinstance(entry[item], str):
|
||||
entry[item] = entry[item].strip()
|
||||
|
||||
return raw_data
|
||||
|
||||
|
||||
def parse(cleandata):
|
||||
"""
|
||||
Main text parsing function for OSX netstat
|
||||
|
||||
Parameters:
|
||||
|
||||
cleandata: (string) text data to parse
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Raw structured data.
|
||||
"""
|
||||
raw_output = []
|
||||
network = False
|
||||
multipath = False
|
||||
socket = False
|
||||
reg_kernel_control = False
|
||||
active_kernel_event = False
|
||||
active_kernel_control = False
|
||||
routing_table = False
|
||||
interface_table = False
|
||||
|
||||
for line in cleandata:
|
||||
|
||||
if line.startswith('Active Internet'):
|
||||
network = True
|
||||
multipath = False
|
||||
socket = False
|
||||
reg_kernel_control = False
|
||||
active_kernel_event = False
|
||||
active_kernel_control = False
|
||||
routing_table = False
|
||||
interface_table = False
|
||||
continue
|
||||
|
||||
if line.startswith('Active Multipath Internet connections'):
|
||||
network = False
|
||||
multipath = True
|
||||
socket = False
|
||||
reg_kernel_control = False
|
||||
active_kernel_event = False
|
||||
active_kernel_control = False
|
||||
routing_table = False
|
||||
interface_table = False
|
||||
continue
|
||||
|
||||
if line.startswith('Active LOCAL (UNIX) domain sockets') or line.startswith('Active UNIX domain sockets'):
|
||||
network = False
|
||||
multipath = False
|
||||
socket = True
|
||||
reg_kernel_control = False
|
||||
active_kernel_event = False
|
||||
active_kernel_control = False
|
||||
routing_table = False
|
||||
interface_table = False
|
||||
continue
|
||||
|
||||
if line.startswith('Registered kernel control modules'):
|
||||
network = False
|
||||
multipath = False
|
||||
socket = False
|
||||
reg_kernel_control = True
|
||||
active_kernel_event = False
|
||||
active_kernel_control = False
|
||||
routing_table = False
|
||||
interface_table = False
|
||||
continue
|
||||
|
||||
if line.startswith('Active kernel event sockets'):
|
||||
network = False
|
||||
multipath = False
|
||||
socket = False
|
||||
reg_kernel_control = False
|
||||
active_kernel_event = True
|
||||
active_kernel_control = False
|
||||
routing_table = False
|
||||
interface_table = False
|
||||
continue
|
||||
|
||||
if line.startswith('Active kernel control sockets'):
|
||||
network = False
|
||||
multipath = False
|
||||
socket = False
|
||||
reg_kernel_control = False
|
||||
active_kernel_event = False
|
||||
active_kernel_control = True
|
||||
routing_table = False
|
||||
interface_table = False
|
||||
continue
|
||||
|
||||
if line.startswith('Routing tables'):
|
||||
network = False
|
||||
multipath = False
|
||||
socket = False
|
||||
reg_kernel_control = False
|
||||
active_kernel_event = False
|
||||
active_kernel_control = False
|
||||
routing_table = True
|
||||
interface_table = False
|
||||
continue
|
||||
|
||||
if line.startswith('Name '):
|
||||
network = False
|
||||
multipath = False
|
||||
socket = False
|
||||
reg_kernel_control = False
|
||||
active_kernel_event = False
|
||||
active_kernel_control = False
|
||||
routing_table = False
|
||||
interface_table = True
|
||||
# don't continue since there is no real header row for this table
|
||||
|
||||
# get headers
|
||||
if network and (line.startswith('Socket ') or line.startswith('Proto ') or line.startswith('Tcpcb ')):
|
||||
header_text = normalize_headers(line)
|
||||
headers = header_text.split()
|
||||
continue
|
||||
|
||||
if socket and line.startswith('Address '):
|
||||
header_text = normalize_headers(line)
|
||||
headers = header_text.split()
|
||||
continue
|
||||
|
||||
if reg_kernel_control and (line.startswith('id ') or line.startswith('kctlref ')):
|
||||
header_text = normalize_headers(line)
|
||||
headers = header_text.split()
|
||||
continue
|
||||
|
||||
if active_kernel_event and (line.startswith('Proto ') or line.startswith(' pcb ')):
|
||||
header_text = normalize_headers(line)
|
||||
headers = header_text.split()
|
||||
continue
|
||||
|
||||
if active_kernel_control and (line.startswith('Proto ') or line.startswith(' pcb ')):
|
||||
header_text = normalize_headers(line)
|
||||
headers = header_text.split()
|
||||
continue
|
||||
|
||||
if routing_table and line.startswith('Destination '):
|
||||
header_text = normalize_route_headers(line)
|
||||
headers = header_text.split()
|
||||
continue
|
||||
|
||||
if interface_table and line.startswith('Name '):
|
||||
header_text = normalize_interface_headers(line)
|
||||
headers = header_text.split()
|
||||
continue
|
||||
|
||||
# get items
|
||||
if network:
|
||||
raw_output.append(parse_item(headers, line, 'network'))
|
||||
continue
|
||||
|
||||
if multipath:
|
||||
# not implemented
|
||||
continue
|
||||
|
||||
if socket:
|
||||
raw_output.append(parse_item(headers, line, 'socket'))
|
||||
continue
|
||||
|
||||
if reg_kernel_control:
|
||||
raw_output.append(parse_item(headers, line, 'Registered kernel control module'))
|
||||
continue
|
||||
|
||||
if active_kernel_event:
|
||||
raw_output.append(parse_item(headers, line, 'Active kernel event socket'))
|
||||
continue
|
||||
|
||||
if active_kernel_control:
|
||||
raw_output.append(parse_item(headers, line, 'Active kernel control socket'))
|
||||
continue
|
||||
|
||||
if routing_table and not (line.startswith('Internet:') or line.startswith('Internet6:')):
|
||||
raw_output.append(parse_item(headers, line, 'route'))
|
||||
continue
|
||||
|
||||
if interface_table:
|
||||
raw_output.append(parse_item(headers, line, 'interface'))
|
||||
continue
|
||||
|
||||
return parse_post(raw_output)
|
||||
280
jc/parsers/netstat_linux.py
Normal file
280
jc/parsers/netstat_linux.py
Normal file
@@ -0,0 +1,280 @@
|
||||
"""jc - JSON CLI output utility Linux netstat Parser"""
|
||||
import string
|
||||
|
||||
|
||||
def normalize_headers(header):
|
||||
header = header.lower()
|
||||
header = header.replace('local address', 'local_address')
|
||||
header = header.replace('foreign address', 'foreign_address')
|
||||
header = header.replace('pid/program name', 'program_name')
|
||||
header = header.replace('security context', 'security_context')
|
||||
header = header.replace('i-node', 'inode')
|
||||
header = header.replace('-', '_')
|
||||
|
||||
return header
|
||||
|
||||
|
||||
def normalize_route_headers(header):
|
||||
header = header.lower()
|
||||
header = header.replace('flags', 'route_flags')
|
||||
header = header.replace('ref', 'route_refs')
|
||||
header = header.replace('-', '_')
|
||||
|
||||
return header
|
||||
|
||||
|
||||
def normalize_interface_headers(header):
|
||||
header = header.lower()
|
||||
header = header.replace('-', '_')
|
||||
|
||||
return header
|
||||
|
||||
|
||||
def parse_network(headers, entry):
|
||||
# Count words in header
|
||||
# if len of line is one less than len of header, then insert None in field 5
|
||||
entry = entry.split(maxsplit=len(headers) - 1)
|
||||
|
||||
if len(entry) == len(headers) - 1:
|
||||
entry.insert(5, None)
|
||||
|
||||
output_line = dict(zip(headers, entry))
|
||||
output_line['kind'] = 'network'
|
||||
|
||||
return output_line
|
||||
|
||||
|
||||
def parse_socket(header_text, headers, entry):
|
||||
# get the column # of first letter of "state"
|
||||
state_col = header_text.find('state')
|
||||
# get the program name column area
|
||||
pn_start = header_text.find('program_name')
|
||||
pn_end = header_text.find('path') - 1
|
||||
|
||||
# remove [ and ] from each line
|
||||
entry = entry.replace('[ ]', '---')
|
||||
entry = entry.replace('[', ' ').replace(']', ' ')
|
||||
|
||||
# find program_name column area and substitute spaces with \u2063 there
|
||||
old_pn = entry[pn_start:pn_end]
|
||||
new_pn = old_pn.replace(' ', '\u2063')
|
||||
entry = entry.replace(old_pn, new_pn)
|
||||
|
||||
entry_list = entry.split(maxsplit=len(headers) - 1)
|
||||
# check column # to see if state column is populated
|
||||
if entry[state_col] in string.whitespace:
|
||||
entry_list.insert(4, None)
|
||||
|
||||
output_line = dict(zip(headers, entry_list))
|
||||
output_line['kind'] = 'socket'
|
||||
|
||||
# fix program_name field to turn \u2063 back to spaces
|
||||
if 'program_name' in output_line:
|
||||
if output_line['program_name']:
|
||||
old_d_pn = output_line['program_name']
|
||||
new_d_pn = old_d_pn.replace('\u2063', ' ')
|
||||
output_line['program_name'] = new_d_pn
|
||||
|
||||
return output_line
|
||||
|
||||
|
||||
def parse_route(headers, entry):
|
||||
entry = entry.split(maxsplit=len(headers) - 1)
|
||||
output_line = dict(zip(headers, entry))
|
||||
output_line['kind'] = 'route'
|
||||
|
||||
return output_line
|
||||
|
||||
|
||||
def parse_interface(headers, entry):
|
||||
entry = entry.split(maxsplit=len(headers) - 1)
|
||||
output_line = dict(zip(headers, entry))
|
||||
output_line['kind'] = 'interface'
|
||||
|
||||
return output_line
|
||||
|
||||
|
||||
def parse_post(raw_data):
|
||||
# clean up trailing whitespace on each item in each entry
|
||||
# flags --- = null
|
||||
# program_name - = null
|
||||
# split pid and program name and ip addresses and ports
|
||||
# create network and transport protocol fields
|
||||
|
||||
for entry in raw_data:
|
||||
for item in entry:
|
||||
try:
|
||||
entry[item] = entry[item].rstrip()
|
||||
except (AttributeError):
|
||||
# skips trying to rstrip Null entries
|
||||
pass
|
||||
|
||||
if 'flags' in entry:
|
||||
if entry['flags'] == '---':
|
||||
entry['flags'] = None
|
||||
|
||||
if 'program_name' in entry:
|
||||
entry['program_name'] = entry['program_name'].strip()
|
||||
if entry['program_name'] == '-':
|
||||
entry['program_name'] = None
|
||||
|
||||
if entry['program_name']:
|
||||
pid = entry['program_name'].split('/', maxsplit=1)[0]
|
||||
name = entry['program_name'].split('/', maxsplit=1)[1]
|
||||
entry['pid'] = pid
|
||||
entry['program_name'] = name
|
||||
|
||||
if 'local_address' in entry:
|
||||
if entry['local_address']:
|
||||
ladd = entry['local_address'].rsplit(':', maxsplit=1)[0]
|
||||
lport = entry['local_address'].rsplit(':', maxsplit=1)[1]
|
||||
entry['local_address'] = ladd
|
||||
entry['local_port'] = lport
|
||||
|
||||
if 'foreign_address' in entry:
|
||||
if entry['foreign_address']:
|
||||
fadd = entry['foreign_address'].rsplit(':', maxsplit=1)[0]
|
||||
fport = entry['foreign_address'].rsplit(':', maxsplit=1)[1]
|
||||
entry['foreign_address'] = fadd
|
||||
entry['foreign_port'] = fport
|
||||
|
||||
if 'proto' in entry and 'kind' in entry:
|
||||
if entry['kind'] == 'network':
|
||||
if 'tcp' in entry['proto']:
|
||||
entry['transport_protocol'] = 'tcp'
|
||||
elif 'udp' in entry['proto']:
|
||||
entry['transport_protocol'] = 'udp'
|
||||
else:
|
||||
entry['transport_protocol'] = None
|
||||
|
||||
if '6' in entry['proto']:
|
||||
entry['network_protocol'] = 'ipv6'
|
||||
else:
|
||||
entry['network_protocol'] = 'ipv4'
|
||||
|
||||
# add route_flags_pretty
|
||||
# Flag mapping from https://www.man7.org/linux/man-pages/man8/route.8.html
|
||||
if 'route_flags' in entry:
|
||||
flag_map = {
|
||||
'U': 'UP',
|
||||
'H': 'HOST',
|
||||
'G': 'GATEWAY',
|
||||
'R': 'REINSTATE',
|
||||
'D': 'DYNAMIC',
|
||||
'M': 'MODIFIED',
|
||||
'A': 'ADDRCONF',
|
||||
'C': 'CACHE',
|
||||
'!': 'REJECT'
|
||||
}
|
||||
|
||||
pretty_flags = []
|
||||
|
||||
for flag in entry['route_flags']:
|
||||
if flag in flag_map:
|
||||
pretty_flags.append(flag_map[flag])
|
||||
|
||||
entry['route_flags_pretty'] = pretty_flags
|
||||
|
||||
return raw_data
|
||||
|
||||
|
||||
def parse(cleandata):
|
||||
"""
|
||||
Main text parsing function for OSX netstat
|
||||
|
||||
Parameters:
|
||||
|
||||
cleandata: (string) text data to parse
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Raw structured data.
|
||||
"""
|
||||
raw_output = []
|
||||
network = False
|
||||
socket = False
|
||||
bluetooth = False
|
||||
routing_table = False
|
||||
interface_table = False
|
||||
headers = None
|
||||
|
||||
for line in cleandata:
|
||||
|
||||
if line.startswith('Active Internet'):
|
||||
network = True
|
||||
socket = False
|
||||
bluetooth = False
|
||||
routing_table = False
|
||||
interface_table = False
|
||||
continue
|
||||
|
||||
if line.startswith('Active UNIX'):
|
||||
network = False
|
||||
socket = True
|
||||
bluetooth = False
|
||||
routing_table = False
|
||||
interface_table = False
|
||||
continue
|
||||
|
||||
if line.startswith('Active Bluetooth'):
|
||||
network = False
|
||||
socket = False
|
||||
bluetooth = True
|
||||
routing_table = False
|
||||
interface_table = False
|
||||
continue
|
||||
|
||||
if line.startswith('Kernel IP routing table'):
|
||||
network = False
|
||||
socket = False
|
||||
bluetooth = False
|
||||
routing_table = True
|
||||
interface_table = False
|
||||
continue
|
||||
|
||||
if line.startswith('Kernel Interface table'):
|
||||
network = False
|
||||
socket = False
|
||||
bluetooth = False
|
||||
routing_table = False
|
||||
interface_table = True
|
||||
continue
|
||||
|
||||
# get headers
|
||||
if line.startswith('Proto'):
|
||||
header_text = normalize_headers(line)
|
||||
headers = header_text.split()
|
||||
continue
|
||||
|
||||
if line.startswith('Destination '):
|
||||
header_text = normalize_route_headers(line)
|
||||
headers = header_text.split()
|
||||
continue
|
||||
|
||||
if line.startswith('Iface '):
|
||||
header_text = normalize_interface_headers(line)
|
||||
headers = header_text.split()
|
||||
continue
|
||||
|
||||
# parse items
|
||||
if network:
|
||||
raw_output.append(parse_network(headers, line))
|
||||
continue
|
||||
|
||||
if socket:
|
||||
raw_output.append(parse_socket(header_text, headers, line))
|
||||
continue
|
||||
|
||||
if bluetooth:
|
||||
# not implemented
|
||||
continue
|
||||
|
||||
if routing_table:
|
||||
raw_output.append(parse_route(headers, line))
|
||||
continue
|
||||
|
||||
if interface_table:
|
||||
raw_output.append(parse_interface(headers, line))
|
||||
continue
|
||||
|
||||
return parse_post(raw_output)
|
||||
299
jc/parsers/ntpq.py
Normal file
299
jc/parsers/ntpq.py
Normal file
@@ -0,0 +1,299 @@
|
||||
"""jc - JSON CLI output utility ntpq Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --ntpq as the first argument if the piped input is coming from ntpq -p
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux', 'freebsd'
|
||||
|
||||
Examples:
|
||||
|
||||
$ ntpq -p | jc --ntpq -p
|
||||
[
|
||||
{
|
||||
"remote": "44.190.6.254",
|
||||
"refid": "127.67.113.92",
|
||||
"st": 2,
|
||||
"t": "u",
|
||||
"when": 1,
|
||||
"poll": 64,
|
||||
"reach": 1,
|
||||
"delay": 23.399,
|
||||
"offset": -2.805,
|
||||
"jitter": 2.131,
|
||||
"state": null
|
||||
},
|
||||
{
|
||||
"remote": "ntp.wdc1.us.lea",
|
||||
"refid": "130.133.1.10",
|
||||
"st": 2,
|
||||
"t": "u",
|
||||
"when": null,
|
||||
"poll": 64,
|
||||
"reach": 1,
|
||||
"delay": 93.053,
|
||||
"offset": -0.807,
|
||||
"jitter": 2.839,
|
||||
"state": null
|
||||
},
|
||||
{
|
||||
"remote": "clock.team-cymr",
|
||||
"refid": "204.9.54.119",
|
||||
"st": 2,
|
||||
"t": "u",
|
||||
"when": null,
|
||||
"poll": 64,
|
||||
"reach": 1,
|
||||
"delay": 70.337,
|
||||
"offset": -2.909,
|
||||
"jitter": 2.6,
|
||||
"state": null
|
||||
},
|
||||
{
|
||||
"remote": "mirror1.sjc02.s",
|
||||
"refid": "216.218.254.202",
|
||||
"st": 2,
|
||||
"t": "u",
|
||||
"when": 2,
|
||||
"poll": 64,
|
||||
"reach": 1,
|
||||
"delay": 29.325,
|
||||
"offset": 1.044,
|
||||
"jitter": 4.069,
|
||||
"state": null,
|
||||
}
|
||||
]
|
||||
|
||||
$ ntpq -pn| jc --ntpq -p
|
||||
[
|
||||
{
|
||||
"remote": "44.190.6.254",
|
||||
"refid": "127.67.113.92",
|
||||
"st": 2,
|
||||
"t": "u",
|
||||
"when": 66,
|
||||
"poll": 64,
|
||||
"reach": 377,
|
||||
"delay": 22.69,
|
||||
"offset": -0.392,
|
||||
"jitter": 2.085,
|
||||
"state": "+"
|
||||
},
|
||||
{
|
||||
"remote": "108.59.2.24",
|
||||
"refid": "130.133.1.10",
|
||||
"st": 2,
|
||||
"t": "u",
|
||||
"when": 63,
|
||||
"poll": 64,
|
||||
"reach": 377,
|
||||
"delay": 90.805,
|
||||
"offset": 2.84,
|
||||
"jitter": 1.908,
|
||||
"state": "-"
|
||||
},
|
||||
{
|
||||
"remote": "38.229.71.1",
|
||||
"refid": "204.9.54.119",
|
||||
"st": 2,
|
||||
"t": "u",
|
||||
"when": 64,
|
||||
"poll": 64,
|
||||
"reach": 377,
|
||||
"delay": 68.699,
|
||||
"offset": -0.61,
|
||||
"jitter": 2.576,
|
||||
"state": "+"
|
||||
},
|
||||
{
|
||||
"remote": "72.5.72.15",
|
||||
"refid": "216.218.254.202",
|
||||
"st": 2,
|
||||
"t": "u",
|
||||
"when": 63,
|
||||
"poll": 64,
|
||||
"reach": 377,
|
||||
"delay": 22.654,
|
||||
"offset": 0.231,
|
||||
"jitter": 1.964,
|
||||
"state": "*"
|
||||
}
|
||||
]
|
||||
|
||||
$ ntpq -pn| jc --ntpq -p -r
|
||||
[
|
||||
{
|
||||
"s": "+",
|
||||
"remote": "44.190.6.254",
|
||||
"refid": "127.67.113.92",
|
||||
"st": "2",
|
||||
"t": "u",
|
||||
"when": "66",
|
||||
"poll": "64",
|
||||
"reach": "377",
|
||||
"delay": "22.690",
|
||||
"offset": "-0.392",
|
||||
"jitter": "2.085"
|
||||
},
|
||||
{
|
||||
"s": "-",
|
||||
"remote": "108.59.2.24",
|
||||
"refid": "130.133.1.10",
|
||||
"st": "2",
|
||||
"t": "u",
|
||||
"when": "63",
|
||||
"poll": "64",
|
||||
"reach": "377",
|
||||
"delay": "90.805",
|
||||
"offset": "2.840",
|
||||
"jitter": "1.908"
|
||||
},
|
||||
{
|
||||
"s": "+",
|
||||
"remote": "38.229.71.1",
|
||||
"refid": "204.9.54.119",
|
||||
"st": "2",
|
||||
"t": "u",
|
||||
"when": "64",
|
||||
"poll": "64",
|
||||
"reach": "377",
|
||||
"delay": "68.699",
|
||||
"offset": "-0.610",
|
||||
"jitter": "2.576"
|
||||
},
|
||||
{
|
||||
"s": "*",
|
||||
"remote": "72.5.72.15",
|
||||
"refid": "216.218.254.202",
|
||||
"st": "2",
|
||||
"t": "u",
|
||||
"when": "63",
|
||||
"poll": "64",
|
||||
"reach": "377",
|
||||
"delay": "22.654",
|
||||
"offset": "0.231",
|
||||
"jitter": "1.964"
|
||||
}
|
||||
]
|
||||
"""
|
||||
import jc.utils
|
||||
import jc.parsers.universal
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.3'
|
||||
description = 'ntpq -p command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
|
||||
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
|
||||
compatible = ['linux', 'freebsd']
|
||||
magic_commands = ['ntpq']
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
|
||||
def process(proc_data):
|
||||
"""
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Structured data with the following schema:
|
||||
|
||||
[
|
||||
{
|
||||
"state": string, # space/~ converted to null
|
||||
"remote": string,
|
||||
"refid": string,
|
||||
"st": integer,
|
||||
"t": string,
|
||||
"when": integer, # - converted to null
|
||||
"poll": integer,
|
||||
"reach": integer,
|
||||
"delay": float,
|
||||
"offset": float,
|
||||
"jitter": float
|
||||
},
|
||||
]
|
||||
|
||||
"""
|
||||
for entry in proc_data:
|
||||
|
||||
if entry['s'] == '~':
|
||||
entry['s'] = None
|
||||
|
||||
entry['state'] = entry.pop('s')
|
||||
|
||||
int_list = ['st', 'when', 'poll', 'reach']
|
||||
for key in int_list:
|
||||
if key in entry:
|
||||
try:
|
||||
entry[key] = int(entry[key])
|
||||
except (ValueError):
|
||||
entry[key] = None
|
||||
|
||||
float_list = ['delay', 'offset', 'jitter']
|
||||
for key in float_list:
|
||||
if key in entry:
|
||||
try:
|
||||
entry[key] = float(entry[key])
|
||||
except (ValueError):
|
||||
entry[key] = None
|
||||
|
||||
return proc_data
|
||||
|
||||
|
||||
def parse(data, raw=False, quiet=False):
|
||||
"""
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Raw or processed structured data.
|
||||
"""
|
||||
if not quiet:
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
cleandata = data.splitlines()
|
||||
raw_output = []
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
cleandata[0] = 's ' + cleandata[0]
|
||||
cleandata[0] = cleandata[0].lower()
|
||||
|
||||
# delete header delimiter
|
||||
del cleandata[1]
|
||||
|
||||
# separate first character with a space for easier parsing
|
||||
for i, line in list(enumerate(cleandata[1:])):
|
||||
if line[0] == ' ':
|
||||
# fixup for no-state
|
||||
cleandata[i + 1] = '~ ' + line[1:]
|
||||
else:
|
||||
# fixup - realign columns since we added the 's' column
|
||||
cleandata[i + 1] = line[:1] + ' ' + line[1:]
|
||||
|
||||
# fixup for occaisional ip/hostname fields with a space
|
||||
cleandata[i + 1] = cleandata[i + 1].replace(' (', '_(')
|
||||
|
||||
raw_output = jc.parsers.universal.simple_table_parse(cleandata)
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
else:
|
||||
return process(raw_output)
|
||||
175
jc/parsers/passwd.py
Normal file
175
jc/parsers/passwd.py
Normal file
@@ -0,0 +1,175 @@
|
||||
"""jc - JSON CLI output utility /etc/passwd file Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --passwd as the first argument if the piped input is coming from /etc/passwd
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux', 'darwin', 'aix', 'freebsd'
|
||||
|
||||
Examples:
|
||||
|
||||
$ cat /etc/passwd | jc --passwd -p
|
||||
[
|
||||
{
|
||||
"username": "nobody",
|
||||
"password": "*",
|
||||
"uid": -2,
|
||||
"gid": -2,
|
||||
"comment": "Unprivileged User",
|
||||
"home": "/var/empty",
|
||||
"shell": "/usr/bin/false"
|
||||
},
|
||||
{
|
||||
"username": "root",
|
||||
"password": "*",
|
||||
"uid": 0,
|
||||
"gid": 0,
|
||||
"comment": "System Administrator",
|
||||
"home": "/var/root",
|
||||
"shell": "/bin/sh"
|
||||
},
|
||||
{
|
||||
"username": "daemon",
|
||||
"password": "*",
|
||||
"uid": 1,
|
||||
"gid": 1,
|
||||
"comment": "System Services",
|
||||
"home": "/var/root",
|
||||
"shell": "/usr/bin/false"
|
||||
},
|
||||
...
|
||||
]
|
||||
|
||||
$ cat /etc/passwd | jc --passwd -p -r
|
||||
[
|
||||
{
|
||||
"username": "nobody",
|
||||
"password": "*",
|
||||
"uid": "-2",
|
||||
"gid": "-2",
|
||||
"comment": "Unprivileged User",
|
||||
"home": "/var/empty",
|
||||
"shell": "/usr/bin/false"
|
||||
},
|
||||
{
|
||||
"username": "root",
|
||||
"password": "*",
|
||||
"uid": "0",
|
||||
"gid": "0",
|
||||
"comment": "System Administrator",
|
||||
"home": "/var/root",
|
||||
"shell": "/bin/sh"
|
||||
},
|
||||
{
|
||||
"username": "daemon",
|
||||
"password": "*",
|
||||
"uid": "1",
|
||||
"gid": "1",
|
||||
"comment": "System Services",
|
||||
"home": "/var/root",
|
||||
"shell": "/usr/bin/false"
|
||||
},
|
||||
...
|
||||
]
|
||||
"""
|
||||
import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.1'
|
||||
description = '/etc/passwd file parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
# details = 'enter any other details here'
|
||||
|
||||
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
|
||||
compatible = ['linux', 'darwin', 'aix', 'freebsd']
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
|
||||
def process(proc_data):
|
||||
"""
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Structured data with the following schema:
|
||||
|
||||
[
|
||||
{
|
||||
"username": string,
|
||||
"password": string,
|
||||
"uid": integer,
|
||||
"gid": integer,
|
||||
"comment": string,
|
||||
"home": string,
|
||||
"shell": string
|
||||
}
|
||||
]
|
||||
"""
|
||||
for entry in proc_data:
|
||||
int_list = ['uid', 'gid']
|
||||
for key in int_list:
|
||||
if key in entry:
|
||||
try:
|
||||
key_int = int(entry[key])
|
||||
entry[key] = key_int
|
||||
except (ValueError):
|
||||
entry[key] = None
|
||||
|
||||
return proc_data
|
||||
|
||||
|
||||
def parse(data, raw=False, quiet=False):
|
||||
"""
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Raw or processed structured data.
|
||||
"""
|
||||
if not quiet:
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
raw_output = []
|
||||
|
||||
# Clear any blank lines
|
||||
cleandata = list(filter(None, data.splitlines()))
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
for entry in cleandata:
|
||||
if entry.startswith('#'):
|
||||
continue
|
||||
|
||||
output_line = {}
|
||||
fields = entry.split(':')
|
||||
|
||||
output_line['username'] = fields[0]
|
||||
output_line['password'] = fields[1]
|
||||
output_line['uid'] = fields[2]
|
||||
output_line['gid'] = fields[3]
|
||||
output_line['comment'] = fields[4]
|
||||
output_line['home'] = fields[5]
|
||||
output_line['shell'] = fields[6]
|
||||
|
||||
raw_output.append(output_line)
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
else:
|
||||
return process(raw_output)
|
||||
507
jc/parsers/ping.py
Normal file
507
jc/parsers/ping.py
Normal file
@@ -0,0 +1,507 @@
|
||||
"""jc - JSON CLI output utility ping Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --ping as the first argument if the piped input is coming from ping
|
||||
|
||||
Note: Use the ping -c (count) option, otherwise data will not be piped to jc.
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux', 'darwin', 'freebsd'
|
||||
|
||||
Examples:
|
||||
|
||||
$ ping -c 3 -p ff cnn.com | jc --ping -p
|
||||
{
|
||||
"destination_ip": "151.101.1.67",
|
||||
"data_bytes": 56,
|
||||
"pattern": "0xff",
|
||||
"destination": "cnn.com",
|
||||
"packets_transmitted": 3,
|
||||
"packets_received": 3,
|
||||
"packet_loss_percent": 0.0,
|
||||
"duplicates": 0,
|
||||
"round_trip_ms_min": 28.015,
|
||||
"round_trip_ms_avg": 32.848,
|
||||
"round_trip_ms_max": 39.376,
|
||||
"round_trip_ms_stddev": 4.79,
|
||||
"responses": [
|
||||
{
|
||||
"type": "reply",
|
||||
"bytes": 64,
|
||||
"response_ip": "151.101.1.67",
|
||||
"icmp_seq": 0,
|
||||
"ttl": 59,
|
||||
"time_ms": 28.015,
|
||||
"duplicate": false
|
||||
},
|
||||
{
|
||||
"type": "reply",
|
||||
"bytes": 64,
|
||||
"response_ip": "151.101.1.67",
|
||||
"icmp_seq": 1,
|
||||
"ttl": 59,
|
||||
"time_ms": 39.376,
|
||||
"duplicate": false
|
||||
},
|
||||
{
|
||||
"type": "reply",
|
||||
"bytes": 64,
|
||||
"response_ip": "151.101.1.67",
|
||||
"icmp_seq": 2,
|
||||
"ttl": 59,
|
||||
"time_ms": 31.153,
|
||||
"duplicate": false
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
$ ping -c 3 -p ff cnn.com | jc --ping -p -r
|
||||
{
|
||||
"destination_ip": "151.101.129.67",
|
||||
"data_bytes": "56",
|
||||
"pattern": "0xff",
|
||||
"destination": "cnn.com",
|
||||
"packets_transmitted": "3",
|
||||
"packets_received": "3",
|
||||
"packet_loss_percent": "0.0",
|
||||
"duplicates": "0",
|
||||
"round_trip_ms_min": "25.078",
|
||||
"round_trip_ms_avg": "29.543",
|
||||
"round_trip_ms_max": "32.553",
|
||||
"round_trip_ms_stddev": "3.221",
|
||||
"responses": [
|
||||
{
|
||||
"type": "reply",
|
||||
"bytes": "64",
|
||||
"response_ip": "151.101.129.67",
|
||||
"icmp_seq": "0",
|
||||
"ttl": "59",
|
||||
"time_ms": "25.078",
|
||||
"duplicate": false
|
||||
},
|
||||
{
|
||||
"type": "reply",
|
||||
"bytes": "64",
|
||||
"response_ip": "151.101.129.67",
|
||||
"icmp_seq": "1",
|
||||
"ttl": "59",
|
||||
"time_ms": "30.999",
|
||||
"duplicate": false
|
||||
},
|
||||
{
|
||||
"type": "reply",
|
||||
"bytes": "64",
|
||||
"response_ip": "151.101.129.67",
|
||||
"icmp_seq": "2",
|
||||
"ttl": "59",
|
||||
"time_ms": "32.553",
|
||||
"duplicate": false
|
||||
}
|
||||
]
|
||||
}
|
||||
"""
|
||||
import string
|
||||
import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.0'
|
||||
description = 'ping command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
|
||||
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
|
||||
compatible = ['linux', 'darwin', 'freebsd']
|
||||
magic_commands = ['ping', 'ping6']
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
|
||||
def process(proc_data):
|
||||
"""
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Structured data with the following schema:
|
||||
|
||||
{
|
||||
"source_ip": string,
|
||||
"destination_ip": string,
|
||||
"data_bytes": integer,
|
||||
"pattern": string, (null if not set)
|
||||
"destination": string,
|
||||
"packets_transmitted": integer,
|
||||
"packets_received": integer,
|
||||
"packet_loss_percent": float,
|
||||
"duplicates": integer,
|
||||
"round_trip_ms_min": float,
|
||||
"round_trip_ms_avg": float,
|
||||
"round_trip_ms_max": float,
|
||||
"round_trip_ms_stddev": float,
|
||||
"responses": [
|
||||
{
|
||||
"type": string, ('reply' or 'timeout')
|
||||
"timestamp": float,
|
||||
"bytes": integer,
|
||||
"response_ip": string,
|
||||
"icmp_seq": integer,
|
||||
"ttl": integer,
|
||||
"time_ms": float,
|
||||
"duplicate": boolean
|
||||
}
|
||||
]
|
||||
}
|
||||
"""
|
||||
int_list = ['data_bytes', 'packets_transmitted', 'packets_received', 'bytes', 'icmp_seq', 'ttl', 'duplicates']
|
||||
float_list = ['packet_loss_percent', 'round_trip_ms_min', 'round_trip_ms_avg', 'round_trip_ms_max',
|
||||
'round_trip_ms_stddev', 'timestamp', 'time_ms']
|
||||
|
||||
for key in proc_data.keys():
|
||||
for item in int_list:
|
||||
if item == key:
|
||||
try:
|
||||
proc_data[key] = int(proc_data[key])
|
||||
except (ValueError, TypeError):
|
||||
proc_data[key] = None
|
||||
|
||||
for item in float_list:
|
||||
if item == key:
|
||||
try:
|
||||
proc_data[key] = float(proc_data[key])
|
||||
except (ValueError, TypeError):
|
||||
proc_data[key] = None
|
||||
|
||||
if key == 'responses':
|
||||
for entry in proc_data['responses']:
|
||||
for k in entry.keys():
|
||||
if k in int_list:
|
||||
try:
|
||||
entry[k] = int(entry[k])
|
||||
except (ValueError, TypeError):
|
||||
entry[k] = None
|
||||
if k in float_list:
|
||||
try:
|
||||
entry[k] = float(entry[k])
|
||||
except (ValueError, TypeError):
|
||||
entry[k] = None
|
||||
|
||||
return proc_data
|
||||
|
||||
|
||||
def linux_parse(data):
|
||||
raw_output = {}
|
||||
ping_responses = []
|
||||
pattern = None
|
||||
footer = False
|
||||
|
||||
linedata = data.splitlines()
|
||||
|
||||
# check for PATTERN
|
||||
if linedata[0].startswith('PATTERN: '):
|
||||
pattern = linedata.pop(0).split(': ')[1]
|
||||
|
||||
while not linedata[0].startswith('PING '):
|
||||
linedata.pop(0)
|
||||
|
||||
ipv4 = True if 'bytes of data' in linedata[0] else False
|
||||
|
||||
if ipv4 and linedata[0][5] not in string.digits:
|
||||
hostname = True
|
||||
elif ipv4 and linedata[0][5] in string.digits:
|
||||
hostname = False
|
||||
elif not ipv4 and ' (' in linedata[0]:
|
||||
hostname = True
|
||||
else:
|
||||
hostname = False
|
||||
|
||||
for line in filter(None, linedata):
|
||||
if line.startswith('PING '):
|
||||
if ipv4 and not hostname:
|
||||
dst_ip, dta_byts = (2, 3)
|
||||
elif ipv4 and hostname:
|
||||
dst_ip, dta_byts = (2, 3)
|
||||
elif not ipv4 and not hostname:
|
||||
dst_ip, dta_byts = (2, 3)
|
||||
else:
|
||||
dst_ip, dta_byts = (3, 4)
|
||||
|
||||
line = line.replace('(', ' ').replace(')', ' ')
|
||||
raw_output.update(
|
||||
{
|
||||
'destination_ip': line.split()[dst_ip].lstrip('(').rstrip(')'),
|
||||
'data_bytes': line.split()[dta_byts],
|
||||
'pattern': pattern
|
||||
}
|
||||
)
|
||||
continue
|
||||
|
||||
if line.startswith('---'):
|
||||
footer = True
|
||||
raw_output['destination'] = line.split()[1]
|
||||
continue
|
||||
|
||||
if footer:
|
||||
if 'packets transmitted' in line:
|
||||
if ' duplicates,' in line:
|
||||
raw_output.update(
|
||||
{
|
||||
'packets_transmitted': line.split()[0],
|
||||
'packets_received': line.split()[3],
|
||||
'packet_loss_percent': line.split()[7].rstrip('%'),
|
||||
'duplicates': line.split()[5].lstrip('+'),
|
||||
'time_ms': line.split()[11].replace('ms', '')
|
||||
}
|
||||
)
|
||||
continue
|
||||
else:
|
||||
raw_output.update(
|
||||
{
|
||||
'packets_transmitted': line.split()[0],
|
||||
'packets_received': line.split()[3],
|
||||
'packet_loss_percent': line.split()[5].rstrip('%'),
|
||||
'duplicates': '0',
|
||||
'time_ms': line.split()[9].replace('ms', '')
|
||||
}
|
||||
)
|
||||
continue
|
||||
|
||||
else:
|
||||
split_line = line.split(' = ')[1]
|
||||
split_line = split_line.split('/')
|
||||
raw_output.update(
|
||||
{
|
||||
'round_trip_ms_min': split_line[0],
|
||||
'round_trip_ms_avg': split_line[1],
|
||||
'round_trip_ms_max': split_line[2],
|
||||
'round_trip_ms_stddev': split_line[3].split()[0]
|
||||
}
|
||||
)
|
||||
|
||||
# ping response lines
|
||||
else:
|
||||
# request timeout
|
||||
if 'no answer yet for icmp_seq=' in line:
|
||||
timestamp = False
|
||||
isequence = 5
|
||||
|
||||
# if timestamp option is specified, then shift icmp sequence field right by one
|
||||
if line[0] == '[':
|
||||
timestamp = True
|
||||
isequence = 6
|
||||
|
||||
response = {
|
||||
'type': 'timeout',
|
||||
'timestamp': line.split()[0].lstrip('[').rstrip(']') if timestamp else None,
|
||||
'icmp_seq': line.replace('=', ' ').split()[isequence]
|
||||
}
|
||||
ping_responses.append(response)
|
||||
continue
|
||||
|
||||
# normal responses
|
||||
else:
|
||||
|
||||
line = line.replace('(', ' ').replace(')', ' ').replace('=', ' ')
|
||||
|
||||
# positions of items depend on whether ipv4/ipv6 and/or ip/hostname is used
|
||||
if ipv4 and not hostname:
|
||||
bts, rip, iseq, t2l, tms = (0, 3, 5, 7, 9)
|
||||
elif ipv4 and hostname:
|
||||
bts, rip, iseq, t2l, tms = (0, 4, 7, 9, 11)
|
||||
elif not ipv4 and not hostname:
|
||||
bts, rip, iseq, t2l, tms = (0, 3, 5, 7, 9)
|
||||
elif not ipv4 and hostname:
|
||||
bts, rip, iseq, t2l, tms = (0, 4, 7, 9, 11)
|
||||
|
||||
# if timestamp option is specified, then shift everything right by one
|
||||
timestamp = False
|
||||
if line[0] == '[':
|
||||
timestamp = True
|
||||
bts, rip, iseq, t2l, tms = (bts + 1, rip + 1, iseq + 1, t2l + 1, tms + 1)
|
||||
|
||||
response = {
|
||||
'type': 'reply',
|
||||
'timestamp': line.split()[0].lstrip('[').rstrip(']') if timestamp else None,
|
||||
'bytes': line.split()[bts],
|
||||
'response_ip': line.split()[rip].rstrip(':'),
|
||||
'icmp_seq': line.split()[iseq],
|
||||
'ttl': line.split()[t2l],
|
||||
'time_ms': line.split()[tms],
|
||||
'duplicate': True if 'DUP!' in line else False
|
||||
}
|
||||
|
||||
ping_responses.append(response)
|
||||
continue
|
||||
|
||||
raw_output['responses'] = ping_responses
|
||||
|
||||
return raw_output
|
||||
|
||||
|
||||
def bsd_parse(data):
|
||||
raw_output = {}
|
||||
ping_responses = []
|
||||
pattern = None
|
||||
footer = False
|
||||
|
||||
linedata = data.splitlines()
|
||||
|
||||
# check for PATTERN
|
||||
if linedata[0].startswith('PATTERN: '):
|
||||
pattern = linedata.pop(0).split(': ')[1]
|
||||
|
||||
for line in filter(None, linedata):
|
||||
if line.startswith('PING '):
|
||||
raw_output.update(
|
||||
{
|
||||
'destination_ip': line.split()[2].lstrip('(').rstrip(':').rstrip(')'),
|
||||
'data_bytes': line.split()[3],
|
||||
'pattern': pattern
|
||||
}
|
||||
)
|
||||
continue
|
||||
|
||||
if line.startswith('PING6('):
|
||||
line = line.replace('(', ' ').replace(')', ' ').replace('=', ' ')
|
||||
raw_output.update(
|
||||
{
|
||||
'source_ip': line.split()[4],
|
||||
'destination_ip': line.split()[6],
|
||||
'data_bytes': line.split()[1],
|
||||
'pattern': pattern
|
||||
}
|
||||
)
|
||||
continue
|
||||
|
||||
if line.startswith('---'):
|
||||
footer = True
|
||||
raw_output['destination'] = line.split()[1]
|
||||
continue
|
||||
|
||||
if footer:
|
||||
if 'packets transmitted' in line:
|
||||
if ' duplicates,' in line:
|
||||
raw_output.update(
|
||||
{
|
||||
'packets_transmitted': line.split()[0],
|
||||
'packets_received': line.split()[3],
|
||||
'packet_loss_percent': line.split()[8].rstrip('%'),
|
||||
'duplicates': line.split()[6].lstrip('+'),
|
||||
}
|
||||
)
|
||||
continue
|
||||
else:
|
||||
raw_output.update(
|
||||
{
|
||||
'packets_transmitted': line.split()[0],
|
||||
'packets_received': line.split()[3],
|
||||
'packet_loss_percent': line.split()[6].rstrip('%'),
|
||||
'duplicates': '0',
|
||||
}
|
||||
)
|
||||
continue
|
||||
|
||||
else:
|
||||
split_line = line.split(' = ')[1]
|
||||
split_line = split_line.split('/')
|
||||
raw_output.update(
|
||||
{
|
||||
'round_trip_ms_min': split_line[0],
|
||||
'round_trip_ms_avg': split_line[1],
|
||||
'round_trip_ms_max': split_line[2],
|
||||
'round_trip_ms_stddev': split_line[3].replace(' ms', '')
|
||||
}
|
||||
)
|
||||
|
||||
# ping response lines
|
||||
else:
|
||||
# ipv4 lines
|
||||
if ',' not in line:
|
||||
|
||||
# request timeout
|
||||
if line.startswith('Request timeout for '):
|
||||
response = {
|
||||
'type': 'timeout',
|
||||
'icmp_seq': line.split()[4]
|
||||
}
|
||||
ping_responses.append(response)
|
||||
continue
|
||||
|
||||
# normal response
|
||||
else:
|
||||
line = line.replace(':', ' ').replace('=', ' ')
|
||||
response = {
|
||||
'type': 'reply',
|
||||
'bytes': line.split()[0],
|
||||
'response_ip': line.split()[3],
|
||||
'icmp_seq': line.split()[5],
|
||||
'ttl': line.split()[7],
|
||||
'time_ms': line.split()[9]
|
||||
}
|
||||
ping_responses.append(response)
|
||||
continue
|
||||
|
||||
# ipv6 lines
|
||||
else:
|
||||
line = line.replace(',', ' ').replace('=', ' ')
|
||||
response = {
|
||||
'type': 'reply',
|
||||
'bytes': line.split()[0],
|
||||
'response_ip': line.split()[3],
|
||||
'icmp_seq': line.split()[5],
|
||||
'ttl': line.split()[7],
|
||||
'time_ms': line.split()[9]
|
||||
}
|
||||
ping_responses.append(response)
|
||||
continue
|
||||
|
||||
# identify duplicates in responses
|
||||
if ping_responses:
|
||||
seq_list = []
|
||||
for reply in ping_responses:
|
||||
seq_list.append(reply['icmp_seq'])
|
||||
reply['duplicate'] = True if seq_list.count(reply['icmp_seq']) > 1 else False
|
||||
|
||||
raw_output['responses'] = ping_responses
|
||||
|
||||
return raw_output
|
||||
|
||||
|
||||
def parse(data, raw=False, quiet=False):
|
||||
"""
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Raw or processed structured data.
|
||||
"""
|
||||
if not quiet:
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
raw_output = {}
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
if 'time' in data.splitlines()[-2]:
|
||||
raw_output = linux_parse(data)
|
||||
else:
|
||||
raw_output = bsd_parse(data)
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
else:
|
||||
return process(raw_output)
|
||||
@@ -32,7 +32,7 @@ import jc.parsers.universal
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.0'
|
||||
version = '1.3'
|
||||
description = 'pip list command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -88,28 +88,28 @@ def parse(data, raw=False, quiet=False):
|
||||
|
||||
raw_output = []
|
||||
|
||||
linedata = data.splitlines()
|
||||
|
||||
# Clear any blank lines
|
||||
cleandata = list(filter(None, linedata))
|
||||
cleandata = list(filter(None, data.splitlines()))
|
||||
|
||||
# detect legacy output type
|
||||
if cleandata[0].find(' (') != -1:
|
||||
for row in cleandata:
|
||||
raw_output.append({'package': row.split(' (')[0],
|
||||
'version': row.split(' (')[1].rstrip(')')})
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
# otherwise normal table output
|
||||
else:
|
||||
# clear separator line
|
||||
for i, line in reversed(list(enumerate(cleandata))):
|
||||
if line.find('---') != -1:
|
||||
cleandata.pop(i)
|
||||
# detect legacy output type
|
||||
if ' (' in cleandata[0]:
|
||||
for row in cleandata:
|
||||
raw_output.append({'package': row.split(' (')[0],
|
||||
'version': row.split(' (')[1].rstrip(')')})
|
||||
|
||||
cleandata[0] = cleandata[0].lower()
|
||||
# otherwise normal table output
|
||||
else:
|
||||
# clear separator line
|
||||
for i, line in reversed(list(enumerate(cleandata))):
|
||||
if '---' in line:
|
||||
cleandata.pop(i)
|
||||
|
||||
if cleandata:
|
||||
raw_output = jc.parsers.universal.simple_table_parse(cleandata)
|
||||
cleandata[0] = cleandata[0].lower()
|
||||
|
||||
if cleandata:
|
||||
raw_output = jc.parsers.universal.simple_table_parse(cleandata)
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
|
||||
@@ -42,7 +42,7 @@ import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.0'
|
||||
version = '1.1'
|
||||
description = 'pip show command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -107,12 +107,11 @@ def parse(data, raw=False, quiet=False):
|
||||
raw_output = []
|
||||
package = {}
|
||||
|
||||
linedata = data.splitlines()
|
||||
|
||||
# Clear any blank lines
|
||||
cleandata = list(filter(None, linedata))
|
||||
cleandata = list(filter(None, data.splitlines()))
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
if cleandata:
|
||||
for row in cleandata:
|
||||
if row.startswith('---'):
|
||||
raw_output.append(package)
|
||||
|
||||
@@ -177,7 +177,7 @@ import jc.parsers.universal
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.1'
|
||||
version = '1.3'
|
||||
description = 'ps command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -282,9 +282,12 @@ def parse(data, raw=False, quiet=False):
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
cleandata = data.splitlines()
|
||||
cleandata[0] = cleandata[0].lower()
|
||||
raw_output = []
|
||||
|
||||
raw_output = jc.parsers.universal.simple_table_parse(cleandata)
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
cleandata[0] = cleandata[0].lower()
|
||||
raw_output = jc.parsers.universal.simple_table_parse(cleandata)
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
|
||||
@@ -14,53 +14,48 @@ Examples:
|
||||
[
|
||||
{
|
||||
"destination": "default",
|
||||
"gateway": "gateway",
|
||||
"gateway": "_gateway",
|
||||
"genmask": "0.0.0.0",
|
||||
"flags": "UG",
|
||||
"metric": 100,
|
||||
"metric": 202,
|
||||
"ref": 0,
|
||||
"use": 0,
|
||||
"iface": "ens33",
|
||||
"mss": 0,
|
||||
"window": 0,
|
||||
"irtt": 0
|
||||
},
|
||||
{
|
||||
"destination": "172.17.0.0",
|
||||
"gateway": "0.0.0.0",
|
||||
"genmask": "255.255.0.0",
|
||||
"flags": "U",
|
||||
"metric": 0,
|
||||
"ref": 0,
|
||||
"use": 0,
|
||||
"iface": "docker",
|
||||
"mss": 0,
|
||||
"window": 0,
|
||||
"irtt": 0
|
||||
"irtt": 0,
|
||||
"flags_pretty": [
|
||||
"UP",
|
||||
"GATEWAY"
|
||||
]
|
||||
},
|
||||
{
|
||||
"destination": "192.168.71.0",
|
||||
"gateway": "0.0.0.0",
|
||||
"genmask": "255.255.255.0",
|
||||
"flags": "U",
|
||||
"metric": 100,
|
||||
"metric": 202,
|
||||
"ref": 0,
|
||||
"use": 0,
|
||||
"iface": "ens33",
|
||||
"mss": 0,
|
||||
"window": 0,
|
||||
"irtt": 0
|
||||
"irtt": 0,
|
||||
"flags_pretty": [
|
||||
"UP"
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
|
||||
$ route -ee | jc --route -p -r
|
||||
[
|
||||
{
|
||||
"destination": "default",
|
||||
"gateway": "gateway",
|
||||
"gateway": "_gateway",
|
||||
"genmask": "0.0.0.0",
|
||||
"flags": "UG",
|
||||
"metric": "100",
|
||||
"metric": "202",
|
||||
"ref": "0",
|
||||
"use": "0",
|
||||
"iface": "ens33",
|
||||
@@ -68,25 +63,12 @@ Examples:
|
||||
"window": "0",
|
||||
"irtt": "0"
|
||||
},
|
||||
{
|
||||
"destination": "172.17.0.0",
|
||||
"gateway": "0.0.0.0",
|
||||
"genmask": "255.255.0.0",
|
||||
"flags": "U",
|
||||
"metric": "0",
|
||||
"ref": "0",
|
||||
"use": "0",
|
||||
"iface": "docker",
|
||||
"mss": "0",
|
||||
"window": "0",
|
||||
"irtt": "0"
|
||||
},
|
||||
{
|
||||
"destination": "192.168.71.0",
|
||||
"gateway": "0.0.0.0",
|
||||
"genmask": "255.255.255.0",
|
||||
"flags": "U",
|
||||
"metric": "100",
|
||||
"metric": "202",
|
||||
"ref": "0",
|
||||
"use": "0",
|
||||
"iface": "ens33",
|
||||
@@ -95,13 +77,14 @@ Examples:
|
||||
"irtt": "0"
|
||||
}
|
||||
]
|
||||
|
||||
"""
|
||||
import jc.utils
|
||||
import jc.parsers.universal
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.0'
|
||||
version = '1.4'
|
||||
description = 'route command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -128,17 +111,20 @@ def process(proc_data):
|
||||
|
||||
[
|
||||
{
|
||||
"destination": string,
|
||||
"gateway": string,
|
||||
"genmask": string,
|
||||
"flags": string,
|
||||
"metric": integer,
|
||||
"ref": integer,
|
||||
"use": integer,
|
||||
"mss": integer,
|
||||
"window": integer,
|
||||
"irtt": integer,
|
||||
"iface": string
|
||||
"destination": string,
|
||||
"gateway": string,
|
||||
"genmask": string,
|
||||
"flags": string,
|
||||
"flags_pretty": [
|
||||
string,
|
||||
]
|
||||
"metric": integer,
|
||||
"ref": integer,
|
||||
"use": integer,
|
||||
"mss": integer,
|
||||
"window": integer,
|
||||
"irtt": integer,
|
||||
"iface": string
|
||||
}
|
||||
]
|
||||
"""
|
||||
@@ -152,6 +138,29 @@ def process(proc_data):
|
||||
except (ValueError):
|
||||
entry[key] = None
|
||||
|
||||
# add flags_pretty
|
||||
# Flag mapping from https://www.man7.org/linux/man-pages/man8/route.8.html
|
||||
if 'flags' in entry:
|
||||
flag_map = {
|
||||
'U': 'UP',
|
||||
'H': 'HOST',
|
||||
'G': 'GATEWAY',
|
||||
'R': 'REINSTATE',
|
||||
'D': 'DYNAMIC',
|
||||
'M': 'MODIFIED',
|
||||
'A': 'ADDRCONF',
|
||||
'C': 'CACHE',
|
||||
'!': 'REJECT'
|
||||
}
|
||||
|
||||
pretty_flags = []
|
||||
|
||||
for flag in entry['flags']:
|
||||
if flag in flag_map:
|
||||
pretty_flags.append(flag_map[flag])
|
||||
|
||||
entry['flags_pretty'] = pretty_flags
|
||||
|
||||
return proc_data
|
||||
|
||||
|
||||
@@ -173,9 +182,18 @@ def parse(data, raw=False, quiet=False):
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
cleandata = data.splitlines()[1:]
|
||||
cleandata[0] = cleandata[0].lower()
|
||||
|
||||
raw_output = jc.parsers.universal.simple_table_parse(cleandata)
|
||||
raw_output = []
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
# fixup header row for ipv6
|
||||
if ' Next Hop ' in cleandata[0]:
|
||||
cleandata[0] = cleandata[0].replace(' If', ' Iface')
|
||||
cleandata[0] = cleandata[0].replace(' Next Hop ', ' Next_Hop ').replace(' Flag ', ' Flags ').replace(' Met ', ' Metric ')
|
||||
|
||||
cleandata[0] = cleandata[0].lower()
|
||||
raw_output = jc.parsers.universal.simple_table_parse(cleandata)
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
|
||||
183
jc/parsers/shadow.py
Normal file
183
jc/parsers/shadow.py
Normal file
@@ -0,0 +1,183 @@
|
||||
"""jc - JSON CLI output utility /etc/shadow file Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --shadow as the first argument if the piped input is coming from /etc/shadow
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux', 'darwin', 'aix', 'freebsd'
|
||||
|
||||
Examples:
|
||||
|
||||
$ sudo cat /etc/shadow | jc --shadow -p
|
||||
[
|
||||
{
|
||||
"username": "root",
|
||||
"password": "*",
|
||||
"last_changed": 18113,
|
||||
"minimum": 0,
|
||||
"maximum": 99999,
|
||||
"warn": 7,
|
||||
"inactive": null,
|
||||
"expire": null
|
||||
},
|
||||
{
|
||||
"username": "daemon",
|
||||
"password": "*",
|
||||
"last_changed": 18113,
|
||||
"minimum": 0,
|
||||
"maximum": 99999,
|
||||
"warn": 7,
|
||||
"inactive": null,
|
||||
"expire": null
|
||||
},
|
||||
{
|
||||
"username": "bin",
|
||||
"password": "*",
|
||||
"last_changed": 18113,
|
||||
"minimum": 0,
|
||||
"maximum": 99999,
|
||||
"warn": 7,
|
||||
"inactive": null,
|
||||
"expire": null
|
||||
},
|
||||
...
|
||||
]
|
||||
|
||||
$ sudo cat /etc/shadow | jc --shadow -p -r
|
||||
[
|
||||
{
|
||||
"username": "root",
|
||||
"password": "*",
|
||||
"last_changed": "18113",
|
||||
"minimum": "0",
|
||||
"maximum": "99999",
|
||||
"warn": "7",
|
||||
"inactive": "",
|
||||
"expire": ""
|
||||
},
|
||||
{
|
||||
"username": "daemon",
|
||||
"password": "*",
|
||||
"last_changed": "18113",
|
||||
"minimum": "0",
|
||||
"maximum": "99999",
|
||||
"warn": "7",
|
||||
"inactive": "",
|
||||
"expire": ""
|
||||
},
|
||||
{
|
||||
"username": "bin",
|
||||
"password": "*",
|
||||
"last_changed": "18113",
|
||||
"minimum": "0",
|
||||
"maximum": "99999",
|
||||
"warn": "7",
|
||||
"inactive": "",
|
||||
"expire": ""
|
||||
},
|
||||
...
|
||||
]
|
||||
"""
|
||||
import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.1'
|
||||
description = '/etc/shadow file parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
# details = 'enter any other details here'
|
||||
|
||||
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
|
||||
compatible = ['linux', 'darwin', 'aix', 'freebsd']
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
|
||||
def process(proc_data):
|
||||
"""
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Structured data with the following schema:
|
||||
|
||||
[
|
||||
{
|
||||
"username": string,
|
||||
"password": string,
|
||||
"last_changed": integer,
|
||||
"minimum": integer,
|
||||
"maximum": integer,
|
||||
"warn": integer,
|
||||
"inactive": integer,
|
||||
"expire": integer
|
||||
}
|
||||
]
|
||||
"""
|
||||
for entry in proc_data:
|
||||
int_list = ['last_changed', 'minimum', 'maximum', 'warn', 'inactive', 'expire']
|
||||
for key in int_list:
|
||||
if key in entry:
|
||||
try:
|
||||
key_int = int(entry[key])
|
||||
entry[key] = key_int
|
||||
except (ValueError):
|
||||
entry[key] = None
|
||||
|
||||
return proc_data
|
||||
|
||||
|
||||
def parse(data, raw=False, quiet=False):
|
||||
"""
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Raw or processed structured data.
|
||||
"""
|
||||
if not quiet:
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
raw_output = []
|
||||
|
||||
# Clear any blank lines
|
||||
cleandata = list(filter(None, data.splitlines()))
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
for entry in cleandata:
|
||||
if entry.startswith('#'):
|
||||
continue
|
||||
|
||||
output_line = {}
|
||||
fields = entry.split(':')
|
||||
|
||||
output_line['username'] = fields[0]
|
||||
output_line['password'] = fields[1]
|
||||
output_line['last_changed'] = fields[2]
|
||||
output_line['minimum'] = fields[3]
|
||||
output_line['maximum'] = fields[4]
|
||||
output_line['warn'] = fields[5]
|
||||
output_line['inactive'] = fields[6]
|
||||
output_line['expire'] = fields[7]
|
||||
|
||||
raw_output.append(output_line)
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
else:
|
||||
return process(raw_output)
|
||||
@@ -251,7 +251,7 @@ import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.0'
|
||||
version = '1.2'
|
||||
description = 'ss command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -308,17 +308,17 @@ def process(proc_data):
|
||||
except (ValueError):
|
||||
entry[key] = None
|
||||
|
||||
if 'local_port' in entry:
|
||||
if 'local_port' in entry:
|
||||
try:
|
||||
entry['local_port_num'] = int(entry['local_port'])
|
||||
except (ValueError):
|
||||
pass
|
||||
|
||||
if 'peer_port' in entry:
|
||||
try:
|
||||
entry['peer_port_num'] = int(entry['peer_port'])
|
||||
except (ValueError):
|
||||
pass
|
||||
if 'peer_port' in entry:
|
||||
try:
|
||||
entry['peer_port_num'] = int(entry['peer_port'])
|
||||
except (ValueError):
|
||||
pass
|
||||
|
||||
return proc_data
|
||||
|
||||
@@ -342,12 +342,12 @@ def parse(data, raw=False, quiet=False):
|
||||
|
||||
contains_colon = ['nl', 'p_raw', 'raw', 'udp', 'tcp', 'v_str', 'icmp6']
|
||||
raw_output = []
|
||||
cleandata = data.splitlines()
|
||||
|
||||
# Clear any blank lines
|
||||
cleandata = list(filter(None, cleandata))
|
||||
cleandata = list(filter(None, data.splitlines()))
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
if cleandata:
|
||||
header_text = cleandata[0].lower()
|
||||
header_text = header_text.replace('netidstate', 'netid state')
|
||||
header_text = header_text.replace('local address:port', 'local_address local_port')
|
||||
|
||||
@@ -6,7 +6,7 @@ Usage:
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux'
|
||||
'linux', 'darwin', 'freebsd'
|
||||
|
||||
Examples:
|
||||
|
||||
@@ -100,17 +100,18 @@ Examples:
|
||||
..
|
||||
]
|
||||
"""
|
||||
import shlex
|
||||
import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.0'
|
||||
version = '1.5'
|
||||
description = 'stat command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
|
||||
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
|
||||
compatible = ['linux']
|
||||
compatible = ['linux', 'darwin', 'freebsd']
|
||||
magic_commands = ['stat']
|
||||
|
||||
|
||||
@@ -149,12 +150,16 @@ def process(proc_data):
|
||||
"access_time": string, # - = null
|
||||
"modify_time": string, # - = null
|
||||
"change_time": string, # - = null
|
||||
"birth_time": string # - = null
|
||||
"birth_time": string, # - = null
|
||||
"unix_device": integer,
|
||||
"rdev": integer,
|
||||
"block_size": integer,
|
||||
"unix_flags": string
|
||||
}
|
||||
]
|
||||
"""
|
||||
for entry in proc_data:
|
||||
int_list = ['size', 'blocks', 'io_blocks', 'inode', 'links', 'uid', 'gid']
|
||||
int_list = ['size', 'blocks', 'io_blocks', 'inode', 'links', 'uid', 'gid', 'unix_device', 'rdev', 'block_size']
|
||||
for key in int_list:
|
||||
if key in entry:
|
||||
try:
|
||||
@@ -192,87 +197,114 @@ def parse(data, raw=False, quiet=False):
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
raw_output = []
|
||||
cleandata = data.splitlines()
|
||||
|
||||
# Clear any blank lines
|
||||
cleandata = list(filter(None, cleandata))
|
||||
cleandata = list(filter(None, data.splitlines()))
|
||||
|
||||
if cleandata:
|
||||
# stats output contains 8 lines
|
||||
for line in cleandata:
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
# line #1
|
||||
if line.find('File:') == 2:
|
||||
output_line = {}
|
||||
line_list = line.split(maxsplit=1)
|
||||
output_line['file'] = line_list[1]
|
||||
# linux output
|
||||
if cleandata[0].startswith(' File: '):
|
||||
# stats output contains 8 lines
|
||||
for line in cleandata:
|
||||
|
||||
# populate link_to field if -> found
|
||||
if output_line['file'].find(' -> ') != -1:
|
||||
filename = output_line['file'].split(' -> ')[0].strip('\u2018').rstrip('\u2019')
|
||||
link = output_line['file'].split(' -> ')[1].strip('\u2018').rstrip('\u2019')
|
||||
output_line['file'] = filename
|
||||
output_line['link_to'] = link
|
||||
else:
|
||||
filename = output_line['file'].split(' -> ')[0].strip('\u2018').rstrip('\u2019')
|
||||
output_line['file'] = filename
|
||||
# line #1
|
||||
if line.find('File:') == 2:
|
||||
output_line = {}
|
||||
line_list = line.split(maxsplit=1)
|
||||
output_line['file'] = line_list[1]
|
||||
|
||||
continue
|
||||
# populate link_to field if -> found
|
||||
if ' -> ' in output_line['file']:
|
||||
filename = output_line['file'].split(' -> ')[0].strip('\u2018').rstrip('\u2019')
|
||||
link = output_line['file'].split(' -> ')[1].strip('\u2018').rstrip('\u2019')
|
||||
output_line['file'] = filename
|
||||
output_line['link_to'] = link
|
||||
else:
|
||||
filename = output_line['file'].split(' -> ')[0].strip('\u2018').rstrip('\u2019')
|
||||
output_line['file'] = filename
|
||||
|
||||
# line #2
|
||||
if line.find('Size:') == 2:
|
||||
line_list = line.split(maxsplit=7)
|
||||
output_line['size'] = line_list[1]
|
||||
output_line['blocks'] = line_list[3]
|
||||
output_line['io_blocks'] = line_list[6]
|
||||
output_line['type'] = line_list[7]
|
||||
continue
|
||||
continue
|
||||
|
||||
# line #3
|
||||
if line.find('Device:') == 0:
|
||||
line_list = line.split()
|
||||
output_line['device'] = line_list[1]
|
||||
output_line['inode'] = line_list[3]
|
||||
output_line['links'] = line_list[5]
|
||||
continue
|
||||
# line #2
|
||||
if line.find('Size:') == 2:
|
||||
line_list = line.split(maxsplit=7)
|
||||
output_line['size'] = line_list[1]
|
||||
output_line['blocks'] = line_list[3]
|
||||
output_line['io_blocks'] = line_list[6]
|
||||
output_line['type'] = line_list[7]
|
||||
continue
|
||||
|
||||
# line #4
|
||||
if line.find('Access: (') == 0:
|
||||
line = line.replace('(', ' ').replace(')', ' ').replace('/', ' ')
|
||||
line_list = line.split()
|
||||
output_line['access'] = line_list[1]
|
||||
output_line['flags'] = line_list[2]
|
||||
output_line['uid'] = line_list[4]
|
||||
output_line['user'] = line_list[5]
|
||||
output_line['gid'] = line_list[7]
|
||||
output_line['group'] = line_list[8]
|
||||
continue
|
||||
# line #3
|
||||
if line.startswith('Device:'):
|
||||
line_list = line.split()
|
||||
output_line['device'] = line_list[1]
|
||||
output_line['inode'] = line_list[3]
|
||||
output_line['links'] = line_list[5]
|
||||
continue
|
||||
|
||||
# line #5
|
||||
if line.find('Access: 2') == 0:
|
||||
line_list = line.split(maxsplit=1)
|
||||
output_line['access_time'] = line_list[1]
|
||||
continue
|
||||
# line #4
|
||||
if line.startswith('Access: ('):
|
||||
line = line.replace('(', ' ').replace(')', ' ').replace('/', ' ')
|
||||
line_list = line.split()
|
||||
output_line['access'] = line_list[1]
|
||||
output_line['flags'] = line_list[2]
|
||||
output_line['uid'] = line_list[4]
|
||||
output_line['user'] = line_list[5]
|
||||
output_line['gid'] = line_list[7]
|
||||
output_line['group'] = line_list[8]
|
||||
continue
|
||||
|
||||
# line #6
|
||||
if line.find('Modify:') == 0:
|
||||
line_list = line.split(maxsplit=1)
|
||||
output_line['modify_time'] = line_list[1]
|
||||
continue
|
||||
# line #5
|
||||
if line.startswith('Access: 2'):
|
||||
line_list = line.split(maxsplit=1)
|
||||
output_line['access_time'] = line_list[1]
|
||||
continue
|
||||
|
||||
# line #7
|
||||
if line.find('Change:') == 0:
|
||||
line_list = line.split(maxsplit=1)
|
||||
output_line['change_time'] = line_list[1]
|
||||
continue
|
||||
# line #6
|
||||
if line.startswith('Modify:'):
|
||||
line_list = line.split(maxsplit=1)
|
||||
output_line['modify_time'] = line_list[1]
|
||||
continue
|
||||
|
||||
# line #8
|
||||
if line.find('Birth:') == 1:
|
||||
line_list = line.split(maxsplit=1)
|
||||
output_line['birth_time'] = line_list[1]
|
||||
# line #7
|
||||
if line.startswith('Change:'):
|
||||
line_list = line.split(maxsplit=1)
|
||||
output_line['change_time'] = line_list[1]
|
||||
continue
|
||||
|
||||
# line #8
|
||||
if line.find('Birth:') == 1:
|
||||
line_list = line.split(maxsplit=1)
|
||||
output_line['birth_time'] = line_list[1]
|
||||
|
||||
raw_output.append(output_line)
|
||||
continue
|
||||
|
||||
# FreeBSD/OSX output
|
||||
else:
|
||||
for line in cleandata:
|
||||
value = shlex.split(line)
|
||||
output_line = {
|
||||
'file': value[15],
|
||||
'unix_device': value[0],
|
||||
'inode': value[1],
|
||||
'flags': value[2],
|
||||
'links': value[3],
|
||||
'user': value[4],
|
||||
'group': value[5],
|
||||
'rdev': value[6],
|
||||
'size': value[7],
|
||||
'access_time': value[8],
|
||||
'modify_time': value[9],
|
||||
'change_time': value[10],
|
||||
'birth_time': value[11],
|
||||
'block_size': value[12],
|
||||
'blocks': value[13],
|
||||
'unix_flags': value[14]
|
||||
}
|
||||
|
||||
raw_output.append(output_line)
|
||||
continue
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
|
||||
154
jc/parsers/sysctl.py
Normal file
154
jc/parsers/sysctl.py
Normal file
@@ -0,0 +1,154 @@
|
||||
"""jc - JSON CLI output utility sysctl -a Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --sysctl as the first argument if the piped input is coming from sysctl -a
|
||||
|
||||
Note: since sysctl output is not easily parsable only a very simple key/value object
|
||||
will be output. An attempt is made to convert obvious integers and floats. If no
|
||||
conversion is desired, use the -r (raw) option.
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux', 'darwin', 'freebsd'
|
||||
|
||||
Examples:
|
||||
|
||||
$ sysctl | jc --sysctl -p
|
||||
{
|
||||
"user.cs_path": "/usr/bin:/bin:/usr/sbin:/sbin",
|
||||
"user.bc_base_max": 99,
|
||||
"user.bc_dim_max": 2048,
|
||||
"user.bc_scale_max": 99,
|
||||
"user.bc_string_max": 1000,
|
||||
"user.coll_weights_max": 2,
|
||||
"user.expr_nest_max": 32
|
||||
...
|
||||
}
|
||||
|
||||
$ sysctl | jc --sysctl -p -r
|
||||
{
|
||||
"user.cs_path": "/usr/bin:/bin:/usr/sbin:/sbin",
|
||||
"user.bc_base_max": "99",
|
||||
"user.bc_dim_max": "2048",
|
||||
"user.bc_scale_max": "99",
|
||||
"user.bc_string_max": "1000",
|
||||
"user.coll_weights_max": "2",
|
||||
"user.expr_nest_max": "32",
|
||||
...
|
||||
}
|
||||
"""
|
||||
import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.0'
|
||||
description = 'sysctl command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
# details = 'enter any other details here'
|
||||
|
||||
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
|
||||
compatible = ['linux', 'darwin', 'freebsd']
|
||||
magic_commands = ['sysctl']
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
|
||||
def process(proc_data):
|
||||
"""
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Structured data with the following schema:
|
||||
|
||||
{
|
||||
"foo": string/integer/float, # best guess based on value
|
||||
"bar": string/integer/float,
|
||||
"baz": string/integer/float
|
||||
}
|
||||
"""
|
||||
for key in proc_data:
|
||||
try:
|
||||
proc_data[key] = int(proc_data[key])
|
||||
except (ValueError):
|
||||
try:
|
||||
proc_data[key] = float(proc_data[key])
|
||||
except (ValueError):
|
||||
pass
|
||||
return proc_data
|
||||
|
||||
|
||||
def parse(data, raw=False, quiet=False):
|
||||
"""
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Raw or processed structured data.
|
||||
"""
|
||||
if not quiet:
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
raw_output = {}
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
data = data.splitlines()
|
||||
|
||||
# linux uses = and bsd uses :
|
||||
if ' = ' in data[0]:
|
||||
delim = ' = '
|
||||
else:
|
||||
delim = ': '
|
||||
|
||||
for line in data:
|
||||
linedata = line.split(delim, maxsplit=1)
|
||||
|
||||
# bsd adds values to newlines, which need to be fixed up with this try/except block
|
||||
try:
|
||||
key = linedata[0]
|
||||
value = linedata[1]
|
||||
|
||||
# syctl -a repeats some keys on linux. Append values from repeating keys
|
||||
# to the previous key value
|
||||
if key in raw_output:
|
||||
existing_value = raw_output[key]
|
||||
raw_output[key] = existing_value + '\n' + value
|
||||
continue
|
||||
|
||||
# fix for weird multiline output in bsd
|
||||
# if the key looks strange (has spaces or no dots) then it's probably a value field
|
||||
# on a separate line. in this case, just append it to the previous key in the dictionary.
|
||||
if '.' not in key or ' ' in key:
|
||||
previous_key = [*raw_output.keys()][-1]
|
||||
raw_output[previous_key] = raw_output[previous_key] + '\n' + line
|
||||
continue
|
||||
|
||||
# if the key looks normal then just add to the dictionary as normal
|
||||
else:
|
||||
raw_output[key] = value
|
||||
continue
|
||||
|
||||
# if there is an IndexError exception, then there was no delimiter in the line.
|
||||
# In this case just append the data line as a value to the previous key.
|
||||
except IndexError:
|
||||
prior_key = [*raw_output.keys()][-1]
|
||||
raw_output[prior_key] = raw_output[prior_key] + '\n' + line
|
||||
continue
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
else:
|
||||
return process(raw_output)
|
||||
@@ -40,7 +40,7 @@ import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.0'
|
||||
version = '1.3'
|
||||
description = 'systemctl command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -96,27 +96,30 @@ def parse(data, raw=False, quiet=False):
|
||||
if not quiet:
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
linedata = data.splitlines()
|
||||
# Clear any blank lines
|
||||
linedata = list(filter(None, linedata))
|
||||
# clean up non-ascii characters, if any
|
||||
cleandata = []
|
||||
for entry in linedata:
|
||||
cleandata.append(entry.encode('ascii', errors='ignore').decode())
|
||||
|
||||
header_text = cleandata[0]
|
||||
header_list = header_text.lower().split()
|
||||
|
||||
linedata = list(filter(None, data.splitlines()))
|
||||
raw_output = []
|
||||
|
||||
for entry in cleandata[1:]:
|
||||
if entry.find('LOAD = ') != -1:
|
||||
break
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
else:
|
||||
entry_list = entry.rstrip().split(maxsplit=4)
|
||||
output_line = dict(zip(header_list, entry_list))
|
||||
raw_output.append(output_line)
|
||||
# clean up non-ascii characters, if any
|
||||
cleandata = []
|
||||
for entry in linedata:
|
||||
cleandata.append(entry.encode('ascii', errors='ignore').decode())
|
||||
|
||||
header_text = cleandata[0]
|
||||
header_list = header_text.lower().split()
|
||||
|
||||
raw_output = []
|
||||
|
||||
for entry in cleandata[1:]:
|
||||
if 'LOAD = ' in entry:
|
||||
break
|
||||
|
||||
else:
|
||||
entry_list = entry.rstrip().split(maxsplit=4)
|
||||
output_line = dict(zip(header_list, entry_list))
|
||||
raw_output.append(output_line)
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
|
||||
@@ -59,7 +59,7 @@ import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.0'
|
||||
version = '1.3'
|
||||
description = 'systemctl list-jobs command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -122,28 +122,32 @@ def parse(data, raw=False, quiet=False):
|
||||
if not quiet:
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
linedata = data.splitlines()
|
||||
# Clear any blank lines
|
||||
linedata = list(filter(None, linedata))
|
||||
# clean up non-ascii characters, if any
|
||||
cleandata = []
|
||||
for entry in linedata:
|
||||
cleandata.append(entry.encode('ascii', errors='ignore').decode())
|
||||
|
||||
header_text = cleandata[0]
|
||||
header_text = header_text.lower()
|
||||
header_list = header_text.split()
|
||||
|
||||
linedata = list(filter(None, data.splitlines()))
|
||||
raw_output = []
|
||||
|
||||
for entry in cleandata[1:]:
|
||||
if entry.find('No jobs running.') != -1 or entry.find('jobs listed.') != -1:
|
||||
break
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
else:
|
||||
entry_list = entry.split(maxsplit=4)
|
||||
output_line = dict(zip(header_list, entry_list))
|
||||
raw_output.append(output_line)
|
||||
cleandata = []
|
||||
|
||||
# clean up non-ascii characters, if any
|
||||
for entry in linedata:
|
||||
cleandata.append(entry.encode('ascii', errors='ignore').decode())
|
||||
|
||||
header_text = cleandata[0]
|
||||
header_text = header_text.lower()
|
||||
header_list = header_text.split()
|
||||
|
||||
raw_output = []
|
||||
|
||||
for entry in cleandata[1:]:
|
||||
if 'No jobs running.' in entry or 'jobs listed.' in entry:
|
||||
break
|
||||
|
||||
else:
|
||||
entry_list = entry.split(maxsplit=4)
|
||||
output_line = dict(zip(header_list, entry_list))
|
||||
raw_output.append(output_line)
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
|
||||
@@ -34,7 +34,7 @@ import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.0'
|
||||
version = '1.3'
|
||||
description = 'systemctl list-sockets command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -88,27 +88,30 @@ def parse(data, raw=False, quiet=False):
|
||||
if not quiet:
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
linedata = data.splitlines()
|
||||
# Clear any blank lines
|
||||
linedata = list(filter(None, linedata))
|
||||
# clean up non-ascii characters, if any
|
||||
cleandata = []
|
||||
for entry in linedata:
|
||||
cleandata.append(entry.encode('ascii', errors='ignore').decode())
|
||||
|
||||
header_text = cleandata[0].lower()
|
||||
header_list = header_text.split()
|
||||
|
||||
linedata = list(filter(None, data.splitlines()))
|
||||
raw_output = []
|
||||
|
||||
for entry in cleandata[1:]:
|
||||
if entry.find('sockets listed.') != -1:
|
||||
break
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
else:
|
||||
entry_list = entry.rsplit(maxsplit=2)
|
||||
output_line = dict(zip(header_list, entry_list))
|
||||
raw_output.append(output_line)
|
||||
cleandata = []
|
||||
# clean up non-ascii characters, if any
|
||||
for entry in linedata:
|
||||
cleandata.append(entry.encode('ascii', errors='ignore').decode())
|
||||
|
||||
header_text = cleandata[0].lower()
|
||||
header_list = header_text.split()
|
||||
|
||||
raw_output = []
|
||||
|
||||
for entry in cleandata[1:]:
|
||||
if 'sockets listed.' in entry:
|
||||
break
|
||||
|
||||
else:
|
||||
entry_list = entry.rsplit(maxsplit=2)
|
||||
output_line = dict(zip(header_list, entry_list))
|
||||
raw_output.append(output_line)
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
|
||||
@@ -31,7 +31,7 @@ import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.0'
|
||||
version = '1.3'
|
||||
description = 'systemctl list-unit-files command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -84,28 +84,31 @@ def parse(data, raw=False, quiet=False):
|
||||
if not quiet:
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
linedata = data.splitlines()
|
||||
# Clear any blank lines
|
||||
linedata = list(filter(None, linedata))
|
||||
# clean up non-ascii characters, if any
|
||||
cleandata = []
|
||||
for entry in linedata:
|
||||
cleandata.append(entry.encode('ascii', errors='ignore').decode())
|
||||
|
||||
header_text = cleandata[0]
|
||||
header_text = header_text.lower().replace('unit file', 'unit_file')
|
||||
header_list = header_text.split()
|
||||
|
||||
linedata = list(filter(None, data.splitlines()))
|
||||
raw_output = []
|
||||
|
||||
for entry in cleandata[1:]:
|
||||
if entry.find('unit files listed.') != -1:
|
||||
break
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
else:
|
||||
entry_list = entry.split(maxsplit=4)
|
||||
output_line = dict(zip(header_list, entry_list))
|
||||
raw_output.append(output_line)
|
||||
cleandata = []
|
||||
# clean up non-ascii characters, if any
|
||||
for entry in linedata:
|
||||
cleandata.append(entry.encode('ascii', errors='ignore').decode())
|
||||
|
||||
header_text = cleandata[0]
|
||||
header_text = header_text.lower().replace('unit file', 'unit_file')
|
||||
header_list = header_text.split()
|
||||
|
||||
raw_output = []
|
||||
|
||||
for entry in cleandata[1:]:
|
||||
if 'unit files listed.' in entry:
|
||||
break
|
||||
|
||||
else:
|
||||
entry_list = entry.split(maxsplit=4)
|
||||
output_line = dict(zip(header_list, entry_list))
|
||||
raw_output.append(output_line)
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
|
||||
124
jc/parsers/timedatectl.py
Normal file
124
jc/parsers/timedatectl.py
Normal file
@@ -0,0 +1,124 @@
|
||||
"""jc - JSON CLI output utility timedatectl Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --timedatectl as the first argument if the piped input is coming from timedatectl or timedatectl status
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux'
|
||||
|
||||
Examples:
|
||||
|
||||
$ timedatectl | jc --timedatectl -p
|
||||
{
|
||||
"local_time": "Tue 2020-03-10 17:53:21 PDT",
|
||||
"universal_time": "Wed 2020-03-11 00:53:21 UTC",
|
||||
"rtc_time": "Wed 2020-03-11 00:53:21",
|
||||
"time_zone": "America/Los_Angeles (PDT, -0700)",
|
||||
"ntp_enabled": true,
|
||||
"ntp_synchronized": true,
|
||||
"rtc_in_local_tz": false,
|
||||
"dst_active": true
|
||||
}
|
||||
|
||||
$ timedatectl | jc --timedatectl -p -r
|
||||
{
|
||||
"local_time": "Tue 2020-03-10 17:53:21 PDT",
|
||||
"universal_time": "Wed 2020-03-11 00:53:21 UTC",
|
||||
"rtc_time": "Wed 2020-03-11 00:53:21",
|
||||
"time_zone": "America/Los_Angeles (PDT, -0700)",
|
||||
"ntp_enabled": "yes",
|
||||
"ntp_synchronized": "yes",
|
||||
"rtc_in_local_tz": "no",
|
||||
"dst_active": "yes"
|
||||
}
|
||||
"""
|
||||
import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.1'
|
||||
description = 'timedatectl status command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
# details = 'enter any other details here'
|
||||
|
||||
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
|
||||
compatible = ['linux']
|
||||
magic_commands = ['timedatectl', 'timedatectl status']
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
|
||||
def process(proc_data):
|
||||
"""
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Structured data with the following schema:
|
||||
|
||||
{
|
||||
"local_time": string,
|
||||
"universal_time": string,
|
||||
"rtc_time": string,
|
||||
"time_zone": string,
|
||||
"ntp_enabled": boolean,
|
||||
"ntp_synchronized": boolean,
|
||||
"system_clock_synchronized": boolean,
|
||||
"systemd-timesyncd.service_active": boolean,
|
||||
"rtc_in_local_tz": boolean,
|
||||
"dst_active": boolean
|
||||
}
|
||||
"""
|
||||
# boolean changes
|
||||
bool_list = ['ntp_enabled', 'ntp_synchronized', 'rtc_in_local_tz', 'dst_active',
|
||||
'system_clock_synchronized', 'systemd-timesyncd.service_active']
|
||||
for key in proc_data:
|
||||
if key in bool_list:
|
||||
try:
|
||||
proc_data[key] = True if proc_data[key] == 'yes' else False
|
||||
except (ValueError):
|
||||
proc_data[key] = None
|
||||
|
||||
return proc_data
|
||||
|
||||
|
||||
def parse(data, raw=False, quiet=False):
|
||||
"""
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Raw or processed structured data.
|
||||
"""
|
||||
if not quiet:
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
raw_output = {}
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
for line in filter(None, data.splitlines()):
|
||||
linedata = line.split(':', maxsplit=1)
|
||||
raw_output[linedata[0].strip().lower().replace(' ', '_')] = linedata[1].strip()
|
||||
|
||||
if linedata[0].strip() == 'DST active':
|
||||
break
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
else:
|
||||
return process(raw_output)
|
||||
251
jc/parsers/tracepath.py
Normal file
251
jc/parsers/tracepath.py
Normal file
@@ -0,0 +1,251 @@
|
||||
"""jc - JSON CLI output utility tracepath Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --tracepath as the first argument if the piped input is coming from tracepath
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux'
|
||||
|
||||
Examples:
|
||||
|
||||
$ tracepath6 3ffe:2400:0:109::2 | jc --tracepath -p
|
||||
{
|
||||
"pmtu": 1480,
|
||||
"forward_hops": 2,
|
||||
"return_hops": 2,
|
||||
"hops": [
|
||||
{
|
||||
"ttl": 1,
|
||||
"guess": true,
|
||||
"host": "[LOCALHOST]",
|
||||
"reply_ms": null,
|
||||
"pmtu": 1500,
|
||||
"asymmetric_difference": null,
|
||||
"reached": false
|
||||
},
|
||||
{
|
||||
"ttl": 1,
|
||||
"guess": false,
|
||||
"host": "dust.inr.ac.ru",
|
||||
"reply_ms": 0.411,
|
||||
"pmtu": null,
|
||||
"asymmetric_difference": null,
|
||||
"reached": false
|
||||
},
|
||||
{
|
||||
"ttl": 2,
|
||||
"guess": false,
|
||||
"host": "dust.inr.ac.ru",
|
||||
"reply_ms": 0.39,
|
||||
"pmtu": 1480,
|
||||
"asymmetric_difference": 1,
|
||||
"reached": false
|
||||
},
|
||||
{
|
||||
"ttl": 2,
|
||||
"guess": false,
|
||||
"host": "3ffe:2400:0:109::2",
|
||||
"reply_ms": 463.514,
|
||||
"pmtu": null,
|
||||
"asymmetric_difference": null,
|
||||
"reached": true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
$ tracepath6 3ffe:2400:0:109::2 | jc --tracepath -p -r
|
||||
{
|
||||
"pmtu": "1480",
|
||||
"forward_hops": "2",
|
||||
"return_hops": "2",
|
||||
"hops": [
|
||||
{
|
||||
"ttl": "1",
|
||||
"guess": true,
|
||||
"host": "[LOCALHOST]",
|
||||
"reply_ms": null,
|
||||
"pmtu": "1500",
|
||||
"asymmetric_difference": null,
|
||||
"reached": false
|
||||
},
|
||||
{
|
||||
"ttl": "1",
|
||||
"guess": false,
|
||||
"host": "dust.inr.ac.ru",
|
||||
"reply_ms": "0.411",
|
||||
"pmtu": null,
|
||||
"asymmetric_difference": null,
|
||||
"reached": false
|
||||
},
|
||||
{
|
||||
"ttl": "2",
|
||||
"guess": false,
|
||||
"host": "dust.inr.ac.ru",
|
||||
"reply_ms": "0.390",
|
||||
"pmtu": "1480",
|
||||
"asymmetric_difference": "1",
|
||||
"reached": false
|
||||
},
|
||||
{
|
||||
"ttl": "2",
|
||||
"guess": false,
|
||||
"host": "3ffe:2400:0:109::2",
|
||||
"reply_ms": "463.514",
|
||||
"pmtu": null,
|
||||
"asymmetric_difference": null,
|
||||
"reached": true
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
"""
|
||||
import re
|
||||
import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.0'
|
||||
description = 'tracepath command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
|
||||
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
|
||||
compatible = ['linux']
|
||||
magic_commands = ['tracepath', 'tracepath6']
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
|
||||
def process(proc_data):
|
||||
"""
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Structured data with the following schema:
|
||||
|
||||
{
|
||||
"pmtu": integer,
|
||||
"forward_hops": integer,
|
||||
"return_hops": integer,
|
||||
"hops": [
|
||||
{
|
||||
"ttl": integer,
|
||||
"guess": boolean,
|
||||
"host": string,
|
||||
"reply_ms": float,
|
||||
"pmtu": integer,
|
||||
"asymmetric_difference": integer,
|
||||
"reached": boolean
|
||||
}
|
||||
]
|
||||
}
|
||||
"""
|
||||
int_list = ['pmtu', 'forward_hops', 'return_hops', 'ttl', 'asymmetric_difference']
|
||||
float_list = ['reply_ms']
|
||||
|
||||
for key, value in proc_data.items():
|
||||
for item in int_list:
|
||||
if key in int_list:
|
||||
try:
|
||||
proc_data[key] = int(proc_data[key])
|
||||
except (ValueError, TypeError):
|
||||
proc_data[key] = None
|
||||
|
||||
for item in int_list:
|
||||
if key in float_list:
|
||||
try:
|
||||
proc_data[key] = float(proc_data[key])
|
||||
except (ValueError, TypeError):
|
||||
proc_data[key] = None
|
||||
|
||||
if 'hops' in proc_data:
|
||||
for entry in proc_data['hops']:
|
||||
for key in int_list:
|
||||
if key in entry:
|
||||
try:
|
||||
entry[key] = int(entry[key])
|
||||
except (ValueError, TypeError):
|
||||
entry[key] = None
|
||||
|
||||
for key in float_list:
|
||||
if key in entry:
|
||||
try:
|
||||
entry[key] = float(entry[key])
|
||||
except (ValueError, TypeError):
|
||||
entry[key] = None
|
||||
|
||||
return proc_data
|
||||
|
||||
|
||||
def parse(data, raw=False, quiet=False):
|
||||
"""
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Raw or processed structured data.
|
||||
"""
|
||||
if not quiet:
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
RE_TTL_HOST = re.compile(r'^\s?(?P<ttl>\d+)(?P<ttl_guess>\??):\s+(?P<host>(?:no reply|\S+))') # groups: ttl, ttl_guess, host
|
||||
RE_PMTU = re.compile(r'\spmtu\s(?P<pmtu>[\d]+)') # group: pmtu
|
||||
RE_REPLY_MS = re.compile(r'\s(?P<reply_ms>\d*\.\d*)ms') # group: reply_ms
|
||||
RE_ASYMM = re.compile(r'\sasymm\s+(?P<asymm>[\d]+)') # group: asymm
|
||||
RE_REACHED = re.compile(r'\sreached')
|
||||
RE_SUMMARY = re.compile(r'\s+Resume:\s+pmtu\s+(?P<pmtu>\d+)(?:\s+hops\s+(?P<hops>\d+))?(?:\s+back\s+(?P<back>\d+))?') # groups: pmtu, hops, back
|
||||
|
||||
raw_output = {}
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
hops = []
|
||||
|
||||
for line in filter(None, data.splitlines()):
|
||||
# grab hop information
|
||||
ttl_host = re.search(RE_TTL_HOST, line)
|
||||
pmtu = re.search(RE_PMTU, line)
|
||||
reply_ms = re.search(RE_REPLY_MS, line)
|
||||
asymm = re.search(RE_ASYMM, line)
|
||||
reached = re.search(RE_REACHED, line)
|
||||
summary = re.search(RE_SUMMARY, line)
|
||||
|
||||
if ttl_host:
|
||||
hop = {
|
||||
'ttl': ttl_host.group('ttl'),
|
||||
'guess': bool(ttl_host.group('ttl_guess')),
|
||||
'host': ttl_host.group('host') if ttl_host.group('host') != 'no reply' else None,
|
||||
'reply_ms': reply_ms.group('reply_ms') if reply_ms else None,
|
||||
'pmtu': pmtu.group('pmtu') if pmtu else None,
|
||||
'asymmetric_difference': asymm.group('asymm') if asymm else None,
|
||||
'reached': bool(reached)
|
||||
}
|
||||
|
||||
hops.append(hop)
|
||||
continue
|
||||
|
||||
elif summary:
|
||||
raw_output = {
|
||||
'pmtu': summary.group('pmtu') if summary.group('pmtu') else None,
|
||||
'forward_hops': summary.group('hops') if summary.group('hops') else None,
|
||||
'return_hops': summary.group('back') if summary.group('back') else None,
|
||||
'hops': hops
|
||||
}
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
else:
|
||||
return process(raw_output)
|
||||
422
jc/parsers/traceroute.py
Normal file
422
jc/parsers/traceroute.py
Normal file
@@ -0,0 +1,422 @@
|
||||
"""jc - JSON CLI output utility traceroute Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --traceroute as the first argument if the piped input is coming from traceroute
|
||||
|
||||
Note: on OSX and FreeBSD be sure to redirect STDERR to STDOUT since the header line is sent to STDERR
|
||||
e.g. $ traceroute 8.8.8.8 2>&1 | jc --traceroute
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux', 'darwin', 'freebsd'
|
||||
|
||||
Examples:
|
||||
|
||||
$ traceroute google.com | jc --traceroute -p
|
||||
{
|
||||
"destination_ip": "216.58.194.46",
|
||||
"destination_name": "google.com",
|
||||
"hops": [
|
||||
{
|
||||
"hop": 1,
|
||||
"probes": [
|
||||
{
|
||||
"annotation": null,
|
||||
"asn": null,
|
||||
"ip": "216.230.231.141",
|
||||
"name": "216-230-231-141.static.houston.tx.oplink.net",
|
||||
"rtt": 198.574
|
||||
},
|
||||
{
|
||||
"annotation": null,
|
||||
"asn": null,
|
||||
"ip": "216.230.231.141",
|
||||
"name": "216-230-231-141.static.houston.tx.oplink.net",
|
||||
"rtt": null
|
||||
},
|
||||
{
|
||||
"annotation": null,
|
||||
"asn": null,
|
||||
"ip": "216.230.231.141",
|
||||
"name": "216-230-231-141.static.houston.tx.oplink.net",
|
||||
"rtt": 198.65
|
||||
}
|
||||
]
|
||||
},
|
||||
...
|
||||
]
|
||||
}
|
||||
|
||||
$ traceroute google.com | jc --traceroute -p -r
|
||||
{
|
||||
"destination_ip": "216.58.194.46",
|
||||
"destination_name": "google.com",
|
||||
"hops": [
|
||||
{
|
||||
"hop": "1",
|
||||
"probes": [
|
||||
{
|
||||
"annotation": null,
|
||||
"asn": null,
|
||||
"ip": "216.230.231.141",
|
||||
"name": "216-230-231-141.static.houston.tx.oplink.net",
|
||||
"rtt": "198.574"
|
||||
},
|
||||
{
|
||||
"annotation": null,
|
||||
"asn": null,
|
||||
"ip": "216.230.231.141",
|
||||
"name": "216-230-231-141.static.houston.tx.oplink.net",
|
||||
"rtt": null
|
||||
},
|
||||
{
|
||||
"annotation": null,
|
||||
"asn": null,
|
||||
"ip": "216.230.231.141",
|
||||
"name": "216-230-231-141.static.houston.tx.oplink.net",
|
||||
"rtt": "198.650"
|
||||
}
|
||||
]
|
||||
},
|
||||
...
|
||||
]
|
||||
}
|
||||
"""
|
||||
import re
|
||||
from decimal import Decimal
|
||||
import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.0'
|
||||
description = 'traceroute command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
details = 'Using the trparse library by Luis Benitez at https://github.com/lbenitez000/trparse'
|
||||
|
||||
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
|
||||
compatible = ['linux', 'darwin', 'freebsd']
|
||||
magic_commands = ['traceroute', 'traceroute6']
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
|
||||
'''
|
||||
Copyright (C) 2015 Luis Benitez
|
||||
|
||||
Parses the output of a traceroute execution into an AST (Abstract Syntax Tree).
|
||||
|
||||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Luis Benitez
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
'''
|
||||
|
||||
RE_HEADER = re.compile(r'(\S+)\s+\((\d+\.\d+\.\d+\.\d+|[0-9a-fA-F:]+)\)')
|
||||
RE_PROBE_NAME_IP = re.compile(r'(\S+)\s+\((\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|[0-9a-fA-F:]+)\)+')
|
||||
RE_PROBE_BSD_IPV6 = re.compile(r'\b(?:[A-Fa-f0-9]{1,4}:){7}[A-Fa-f0-9]{1,4}\b')
|
||||
RE_HOP = re.compile(r'^\s*(\d+)?\s+(.+)$')
|
||||
RE_PROBE_ASN = re.compile(r'\[AS(\d+)\]')
|
||||
RE_PROBE_RTT_ANNOTATION = re.compile(r'(\d+\.?\d+)?\s+ms|(\s+\*\s+)\s*(!\S*)?')
|
||||
|
||||
|
||||
class Traceroute(object):
|
||||
def __init__(self, dest_name, dest_ip):
|
||||
self.dest_name = dest_name
|
||||
self.dest_ip = dest_ip
|
||||
self.hops = []
|
||||
|
||||
def add_hop(self, hop):
|
||||
self.hops.append(hop)
|
||||
|
||||
def __str__(self):
|
||||
text = "Traceroute for %s (%s)\n\n" % (self.dest_name, self.dest_ip)
|
||||
for hop in self.hops:
|
||||
text += str(hop)
|
||||
return text
|
||||
|
||||
|
||||
class Hop(object):
|
||||
def __init__(self, idx):
|
||||
self.idx = idx # Hop count, starting at 1 (usually)
|
||||
self.probes = [] # Series of Probe instances
|
||||
|
||||
def add_probe(self, probe):
|
||||
"""Adds a Probe instance to this hop's results."""
|
||||
if self.probes:
|
||||
probe_last = self.probes[-1]
|
||||
if not probe.ip:
|
||||
probe.ip = probe_last.ip
|
||||
probe.name = probe_last.name
|
||||
self.probes.append(probe)
|
||||
|
||||
def __str__(self):
|
||||
text = "{:>3d} ".format(self.idx)
|
||||
text_len = len(text)
|
||||
for n, probe in enumerate(self.probes):
|
||||
text_probe = str(probe)
|
||||
if n:
|
||||
text += (text_len * " ") + text_probe
|
||||
else:
|
||||
text += text_probe
|
||||
text += "\n"
|
||||
return text
|
||||
|
||||
|
||||
class Probe(object):
|
||||
def __init__(self, name=None, ip=None, asn=None, rtt=None, annotation=None):
|
||||
self.name = name
|
||||
self.ip = ip
|
||||
self.asn = asn # Autonomous System number
|
||||
self.rtt = rtt # RTT in ms
|
||||
self.annotation = annotation # Annotation, such as !H, !N, !X, etc
|
||||
|
||||
def __str__(self):
|
||||
text = ""
|
||||
if self.asn is not None:
|
||||
text += "[AS{:d}] ".format(self.asn)
|
||||
if self.rtt:
|
||||
text += "{:s} ({:s}) {:1.3f} ms".format(self.name, self.ip, self.rtt)
|
||||
else:
|
||||
text = "*"
|
||||
if self.annotation:
|
||||
text += " {:s}".format(self.annotation)
|
||||
text += "\n"
|
||||
return text
|
||||
|
||||
|
||||
def loads(data):
|
||||
lines = data.splitlines()
|
||||
|
||||
# Get headers
|
||||
match_dest = RE_HEADER.search(lines[0])
|
||||
dest_name = match_dest.group(1)
|
||||
dest_ip = match_dest.group(2)
|
||||
|
||||
# The Traceroute node is the root of the tree
|
||||
traceroute = Traceroute(dest_name, dest_ip)
|
||||
|
||||
# Parse the remaining lines, they should be only hops/probes
|
||||
for line in lines[1:]:
|
||||
# Skip empty lines
|
||||
if not line:
|
||||
continue
|
||||
|
||||
hop_match = RE_HOP.match(line)
|
||||
|
||||
if hop_match.group(1):
|
||||
hop_index = int(hop_match.group(1))
|
||||
else:
|
||||
hop_index = None
|
||||
|
||||
if hop_index is not None:
|
||||
hop = Hop(hop_index)
|
||||
traceroute.add_hop(hop)
|
||||
|
||||
hop_string = hop_match.group(2)
|
||||
|
||||
probe_asn_match = RE_PROBE_ASN.search(hop_string)
|
||||
if probe_asn_match:
|
||||
probe_asn = int(probe_asn_match.group(1))
|
||||
else:
|
||||
probe_asn = None
|
||||
|
||||
probe_name_ip_match = RE_PROBE_NAME_IP.search(hop_string)
|
||||
probe_bsd_ipv6_match = RE_PROBE_BSD_IPV6.search(hop_string)
|
||||
if probe_name_ip_match:
|
||||
probe_name = probe_name_ip_match.group(1)
|
||||
probe_ip = probe_name_ip_match.group(2)
|
||||
elif probe_bsd_ipv6_match:
|
||||
probe_name = None
|
||||
probe_ip = probe_bsd_ipv6_match.group(0)
|
||||
else:
|
||||
probe_name = None
|
||||
probe_ip = None
|
||||
|
||||
probe_rtt_annotations = RE_PROBE_RTT_ANNOTATION.findall(hop_string)
|
||||
|
||||
for probe_rtt_annotation in probe_rtt_annotations:
|
||||
if probe_rtt_annotation[0]:
|
||||
probe_rtt = Decimal(probe_rtt_annotation[0])
|
||||
elif probe_rtt_annotation[1]:
|
||||
probe_rtt = None
|
||||
else:
|
||||
message = f"Expected probe RTT or *. Got: '{probe_rtt_annotation[0]}'"
|
||||
raise ParseError(message)
|
||||
|
||||
probe_annotation = probe_rtt_annotation[2] or None
|
||||
|
||||
probe = Probe(
|
||||
name=probe_name,
|
||||
ip=probe_ip,
|
||||
asn=probe_asn,
|
||||
rtt=probe_rtt,
|
||||
annotation=probe_annotation
|
||||
)
|
||||
hop.add_probe(probe)
|
||||
|
||||
return traceroute
|
||||
|
||||
|
||||
class ParseError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
########################################################################################
|
||||
|
||||
def process(proc_data):
|
||||
"""
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Structured data with the following schema:
|
||||
|
||||
{
|
||||
"destination_ip": string,
|
||||
"destination_name": string,
|
||||
"hops": [
|
||||
{
|
||||
"hop": integer,
|
||||
"probes": [
|
||||
{
|
||||
"annotation": string,
|
||||
"asn": integer,
|
||||
"ip": string,
|
||||
"name": string,
|
||||
"rtt": float
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
"""
|
||||
int_list = ['hop', 'asn']
|
||||
float_list = ['rtt']
|
||||
|
||||
if 'hops' in proc_data:
|
||||
for entry in proc_data['hops']:
|
||||
for key in int_list:
|
||||
if key in entry:
|
||||
try:
|
||||
entry[key] = int(entry[key])
|
||||
except (ValueError, TypeError):
|
||||
entry[key] = None
|
||||
|
||||
for key in float_list:
|
||||
if key in entry:
|
||||
try:
|
||||
entry[key] = float(entry[key])
|
||||
except (ValueError, TypeError):
|
||||
entry[key] = None
|
||||
|
||||
if 'probes' in entry:
|
||||
for item in entry['probes']:
|
||||
for key in int_list:
|
||||
if key in item:
|
||||
try:
|
||||
item[key] = int(item[key])
|
||||
except (ValueError, TypeError):
|
||||
item[key] = None
|
||||
|
||||
for key in float_list:
|
||||
if key in item:
|
||||
try:
|
||||
item[key] = float(item[key])
|
||||
except (ValueError, TypeError):
|
||||
item[key] = None
|
||||
|
||||
return proc_data
|
||||
|
||||
|
||||
def parse(data, raw=False, quiet=False):
|
||||
"""
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Raw or processed structured data.
|
||||
"""
|
||||
if not quiet:
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
raw_output = {}
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
# remove any warning lines
|
||||
new_data = []
|
||||
for data_line in data.splitlines():
|
||||
if 'traceroute: Warning: ' not in data_line and 'traceroute6: Warning: ' not in data_line:
|
||||
new_data.append(data_line)
|
||||
else:
|
||||
continue
|
||||
data = '\n'.join(new_data)
|
||||
|
||||
# check if header row exists, otherwise raise exception
|
||||
if not data.splitlines()[0].startswith('traceroute to ') and not data.splitlines()[0].startswith('traceroute6 to '):
|
||||
raise ParseError('Traceroute header line not found. Be sure to redirect STDERR to STDOUT on some operating systems.')
|
||||
|
||||
tr = loads(data)
|
||||
hops = tr.hops
|
||||
hops_list = []
|
||||
|
||||
if hops:
|
||||
for hop in hops:
|
||||
hop_obj = {}
|
||||
hop_obj['hop'] = str(hop.idx)
|
||||
probe_list = []
|
||||
|
||||
if hop.probes:
|
||||
for probe in hop.probes:
|
||||
probe_obj = {
|
||||
'annotation': probe.annotation,
|
||||
'asn': None if probe.asn is None else str(probe.asn),
|
||||
'ip': probe.ip,
|
||||
'name': probe.name,
|
||||
'rtt': None if probe.rtt is None else str(probe.rtt)
|
||||
}
|
||||
probe_list.append(probe_obj)
|
||||
|
||||
hop_obj['probes'] = probe_list
|
||||
hops_list.append(hop_obj)
|
||||
|
||||
raw_output = {
|
||||
'destination_ip': tr.dest_ip,
|
||||
'destination_name': tr.dest_name,
|
||||
'hops': hops_list
|
||||
}
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
else:
|
||||
return process(raw_output)
|
||||
@@ -10,7 +10,7 @@ Limitations:
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux', 'darwin'
|
||||
'linux', 'darwin', 'freebsd'
|
||||
|
||||
Example:
|
||||
|
||||
@@ -30,19 +30,23 @@ import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.1'
|
||||
version = '1.4'
|
||||
description = 'uname -a command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
|
||||
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
|
||||
compatible = ['linux', 'darwin']
|
||||
compatible = ['linux', 'darwin', 'freebsd']
|
||||
magic_commands = ['uname']
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
|
||||
class ParseError(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def process(proc_data):
|
||||
"""
|
||||
Final processing to conform to the schema.
|
||||
@@ -88,12 +92,16 @@ def parse(data, raw=False, quiet=False):
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
raw_output = {}
|
||||
split_line = data.split()
|
||||
|
||||
if len(split_line) > 1:
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
# check for OSX output
|
||||
if data.startswith('Darwin'):
|
||||
parsed_line = data.split()
|
||||
|
||||
if len(parsed_line) < 5:
|
||||
raise ParseError('Could not parse uname output. Make sure to use "uname -a".')
|
||||
|
||||
raw_output['machine'] = parsed_line.pop(-1)
|
||||
raw_output['kernel_name'] = parsed_line.pop(0)
|
||||
raw_output['node_name'] = parsed_line.pop(0)
|
||||
@@ -103,6 +111,10 @@ def parse(data, raw=False, quiet=False):
|
||||
# otherwise use linux parser
|
||||
else:
|
||||
parsed_line = data.split(maxsplit=3)
|
||||
|
||||
if len(parsed_line) < 3:
|
||||
raise ParseError('Could not parse uname output. Make sure to use "uname -a".')
|
||||
|
||||
raw_output['kernel_name'] = parsed_line.pop(0)
|
||||
raw_output['node_name'] = parsed_line.pop(0)
|
||||
raw_output['kernel_release'] = parsed_line.pop(0)
|
||||
|
||||
@@ -34,7 +34,7 @@ import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.0'
|
||||
version = '1.2'
|
||||
description = 'uptime command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -107,10 +107,10 @@ def parse(data, raw=False, quiet=False):
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
raw_output = {}
|
||||
|
||||
cleandata = data.splitlines()
|
||||
|
||||
if cleandata:
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
parsed_line = cleandata[0].split()
|
||||
|
||||
# allow space for odd times
|
||||
|
||||
@@ -83,7 +83,7 @@ import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.0'
|
||||
version = '1.3'
|
||||
description = 'w command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -149,30 +149,40 @@ def parse(data, raw=False, quiet=False):
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
cleandata = data.splitlines()[1:]
|
||||
header_text = cleandata[0].lower()
|
||||
# fixup for 'from' column that can be blank
|
||||
from_col = header_text.find('from')
|
||||
# clean up 'login@' header
|
||||
# even though @ in a key is valid json, it can make things difficult
|
||||
header_text = header_text.replace('login@', 'login_at')
|
||||
headers = [h for h in ' '.join(header_text.strip().split()).split() if h]
|
||||
|
||||
# parse lines
|
||||
raw_output = []
|
||||
if cleandata:
|
||||
for entry in cleandata[1:]:
|
||||
output_line = {}
|
||||
|
||||
# normalize data by inserting Null for missing data
|
||||
temp_line = entry.split(maxsplit=len(headers) - 1)
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
# fix from column, always at column 2
|
||||
if 'from' in headers:
|
||||
if entry[from_col] in string.whitespace:
|
||||
temp_line.insert(2, '-')
|
||||
header_text = cleandata[0].lower()
|
||||
# fixup for 'from' column that can be blank
|
||||
from_col = header_text.find('from')
|
||||
# clean up 'login@' header
|
||||
# even though @ in a key is valid json, it can make things difficult
|
||||
header_text = header_text.replace('login@', 'login_at')
|
||||
headers = [h for h in ' '.join(header_text.strip().split()).split() if h]
|
||||
|
||||
output_line = dict(zip(headers, temp_line))
|
||||
raw_output.append(output_line)
|
||||
# parse lines
|
||||
raw_output = []
|
||||
if cleandata:
|
||||
for entry in cleandata[1:]:
|
||||
output_line = {}
|
||||
|
||||
# normalize data by inserting Null for missing data
|
||||
temp_line = entry.split(maxsplit=len(headers) - 1)
|
||||
|
||||
# fix from column, always at column 2
|
||||
if 'from' in headers:
|
||||
if entry[from_col] in string.whitespace:
|
||||
temp_line.insert(2, '-')
|
||||
|
||||
output_line = dict(zip(headers, temp_line))
|
||||
raw_output.append(output_line)
|
||||
|
||||
# strip whitespace from beginning and end of all string values
|
||||
for row in raw_output:
|
||||
for item in row:
|
||||
if isinstance(row[item], str):
|
||||
row[item] = row[item].strip()
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
|
||||
284
jc/parsers/who.py
Normal file
284
jc/parsers/who.py
Normal file
@@ -0,0 +1,284 @@
|
||||
"""jc - JSON CLI output utility who Parser
|
||||
|
||||
Usage:
|
||||
|
||||
specify --who as the first argument if the piped input is coming from who
|
||||
|
||||
accepts any of the following who options (or no options): -aTH
|
||||
|
||||
Compatibility:
|
||||
|
||||
'linux', 'darwin', 'cygwin', 'aix', 'freebsd'
|
||||
|
||||
Examples:
|
||||
|
||||
$ who -a | jc --who -p
|
||||
[
|
||||
{
|
||||
"event": "reboot",
|
||||
"time": "Feb 7 23:31",
|
||||
"pid": 1
|
||||
},
|
||||
{
|
||||
"user": "joeuser",
|
||||
"writeable_tty": "-",
|
||||
"tty": "console",
|
||||
"time": "Feb 7 23:32",
|
||||
"idle": "old",
|
||||
"pid": 105
|
||||
},
|
||||
{
|
||||
"user": "joeuser",
|
||||
"writeable_tty": "+",
|
||||
"tty": "ttys000",
|
||||
"time": "Feb 13 16:44",
|
||||
"idle": ".",
|
||||
"pid": 51217,
|
||||
"comment": "term=0 exit=0"
|
||||
},
|
||||
{
|
||||
"user": "joeuser",
|
||||
"writeable_tty": "?",
|
||||
"tty": "ttys003",
|
||||
"time": "Feb 28 08:59",
|
||||
"idle": "01:36",
|
||||
"pid": 41402
|
||||
},
|
||||
{
|
||||
"user": "joeuser",
|
||||
"writeable_tty": "+",
|
||||
"tty": "ttys004",
|
||||
"time": "Mar 1 16:35",
|
||||
"idle": ".",
|
||||
"pid": 15679,
|
||||
"from": "192.168.1.5"
|
||||
}
|
||||
]
|
||||
|
||||
$ who -a | jc --who -p -r
|
||||
[
|
||||
{
|
||||
"event": "reboot",
|
||||
"time": "Feb 7 23:31",
|
||||
"pid": "1"
|
||||
},
|
||||
{
|
||||
"user": "joeuser",
|
||||
"writeable_tty": "-",
|
||||
"tty": "console",
|
||||
"time": "Feb 7 23:32",
|
||||
"idle": "old",
|
||||
"pid": "105"
|
||||
},
|
||||
{
|
||||
"user": "joeuser",
|
||||
"writeable_tty": "+",
|
||||
"tty": "ttys000",
|
||||
"time": "Feb 13 16:44",
|
||||
"idle": ".",
|
||||
"pid": "51217",
|
||||
"comment": "term=0 exit=0"
|
||||
},
|
||||
{
|
||||
"user": "joeuser",
|
||||
"writeable_tty": "?",
|
||||
"tty": "ttys003",
|
||||
"time": "Feb 28 08:59",
|
||||
"idle": "01:36",
|
||||
"pid": "41402"
|
||||
},
|
||||
{
|
||||
"user": "joeuser",
|
||||
"writeable_tty": "+",
|
||||
"tty": "ttys004",
|
||||
"time": "Mar 1 16:35",
|
||||
"idle": ".",
|
||||
"pid": "15679",
|
||||
"from": "192.168.1.5"
|
||||
}
|
||||
]
|
||||
"""
|
||||
import re
|
||||
import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.1'
|
||||
description = 'who command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
# details = 'enter any other details here'
|
||||
|
||||
# compatible options: linux, darwin, cygwin, win32, aix, freebsd
|
||||
compatible = ['linux', 'darwin', 'cygwin', 'aix', 'freebsd']
|
||||
magic_commands = ['who']
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
|
||||
def process(proc_data):
|
||||
"""
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Structured data with the following schema:
|
||||
|
||||
[
|
||||
{
|
||||
"user": string,
|
||||
"event": string,
|
||||
"writeable_tty": string,
|
||||
"tty": string,
|
||||
"time": string,
|
||||
"idle": string,
|
||||
"pid": integer,
|
||||
"from": string,
|
||||
"comment": string
|
||||
}
|
||||
]
|
||||
"""
|
||||
for entry in proc_data:
|
||||
int_list = ['pid']
|
||||
for key in int_list:
|
||||
if key in entry:
|
||||
try:
|
||||
key_int = int(entry[key])
|
||||
entry[key] = key_int
|
||||
except (ValueError):
|
||||
entry[key] = None
|
||||
|
||||
return proc_data
|
||||
|
||||
|
||||
def parse(data, raw=False, quiet=False):
|
||||
"""
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) output preprocessed JSON if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of dictionaries. Raw or processed structured data.
|
||||
"""
|
||||
if not quiet:
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
raw_output = []
|
||||
|
||||
# Clear any blank lines
|
||||
cleandata = list(filter(None, data.splitlines()))
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
for line in cleandata:
|
||||
output_line = {}
|
||||
linedata = line.split()
|
||||
|
||||
# clear headers, if they exist
|
||||
if ''.join(linedata[0:3]) == 'NAMELINETIME' \
|
||||
or ''.join(linedata[0:3]) == 'USERLINEWHEN':
|
||||
linedata.pop(0)
|
||||
continue
|
||||
|
||||
# mac reboot line
|
||||
if linedata[0] == 'reboot':
|
||||
output_line['event'] = 'reboot'
|
||||
output_line['time'] = ' '.join(linedata[2:5])
|
||||
output_line['pid'] = linedata[6]
|
||||
raw_output.append(output_line)
|
||||
continue
|
||||
|
||||
# linux reboot line
|
||||
if ''.join(linedata[0:2]) == 'systemboot':
|
||||
output_line['event'] = 'reboot'
|
||||
output_line['time'] = ' '.join(linedata[2:4])
|
||||
raw_output.append(output_line)
|
||||
continue
|
||||
|
||||
# linux login line
|
||||
if linedata[0] == 'LOGIN':
|
||||
output_line['event'] = 'login'
|
||||
output_line['tty'] = linedata[1]
|
||||
output_line['time'] = ' '.join(linedata[2:4])
|
||||
output_line['pid'] = linedata[4]
|
||||
if len(linedata) > 5:
|
||||
output_line['comment'] = ' '.join(linedata[5:])
|
||||
raw_output.append(output_line)
|
||||
continue
|
||||
|
||||
# linux run-level
|
||||
if linedata[0] == 'run-level':
|
||||
output_line['event'] = ' '.join(linedata[0:2])
|
||||
output_line['time'] = ' '.join(linedata[2:4])
|
||||
raw_output.append(output_line)
|
||||
continue
|
||||
|
||||
# mac run-level (ignore because not enough useful info)
|
||||
if linedata[1] == 'run-level':
|
||||
continue
|
||||
|
||||
# pts lines with no user information
|
||||
if linedata[0].startswith('pts/'):
|
||||
output_line['tty'] = linedata[0]
|
||||
output_line['time'] = ' '.join(linedata[1:3])
|
||||
output_line['pid'] = linedata[3]
|
||||
output_line['comment'] = ' '.join(linedata[4:])
|
||||
raw_output.append(output_line)
|
||||
continue
|
||||
|
||||
# user logins
|
||||
output_line['user'] = linedata.pop(0)
|
||||
|
||||
if linedata[0] in '+-?':
|
||||
output_line['writeable_tty'] = linedata.pop(0)
|
||||
|
||||
output_line['tty'] = linedata.pop(0)
|
||||
|
||||
# mac
|
||||
if re.match(r'[JFMASOND][aepuco][nbrynlgptvc]', linedata[0]):
|
||||
output_line['time'] = ' '.join([linedata.pop(0),
|
||||
linedata.pop(0),
|
||||
linedata.pop(0)])
|
||||
# linux
|
||||
else:
|
||||
output_line['time'] = ' '.join([linedata.pop(0),
|
||||
linedata.pop(0)])
|
||||
|
||||
# if just one more field, then it's the remote IP
|
||||
if len(linedata) == 1:
|
||||
output_line['from'] = linedata[0].replace('(', '').replace(')', '')
|
||||
raw_output.append(output_line)
|
||||
continue
|
||||
|
||||
# extended info: idle
|
||||
if len(linedata) > 0:
|
||||
output_line['idle'] = linedata.pop(0)
|
||||
|
||||
# extended info: pid
|
||||
if len(linedata) > 0:
|
||||
output_line['pid'] = linedata.pop(0)
|
||||
|
||||
# extended info is from
|
||||
if len(linedata) > 0 and linedata[0].startswith('('):
|
||||
output_line['from'] = linedata[0].replace('(', '').replace(')', '')
|
||||
|
||||
# else, extended info is comment
|
||||
elif len(linedata) > 0:
|
||||
output_line['comment'] = ' '.join(linedata)
|
||||
|
||||
raw_output.append(output_line)
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
else:
|
||||
return process(raw_output)
|
||||
@@ -59,7 +59,7 @@ import xmltodict
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.0'
|
||||
version = '1.2'
|
||||
description = 'XML file parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -111,7 +111,10 @@ def parse(data, raw=False, quiet=False):
|
||||
if not quiet:
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
if data:
|
||||
raw_output = []
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
raw_output = xmltodict.parse(data)
|
||||
|
||||
if raw:
|
||||
|
||||
@@ -71,7 +71,7 @@ from ruamel.yaml import YAML
|
||||
|
||||
|
||||
class info():
|
||||
version = '1.0'
|
||||
version = '1.1'
|
||||
description = 'YAML file parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -126,10 +126,13 @@ def parse(data, raw=False, quiet=False):
|
||||
jc.utils.compatibility(__name__, info.compatible)
|
||||
|
||||
raw_output = []
|
||||
yaml = YAML(typ='safe')
|
||||
|
||||
for document in yaml.load_all(data):
|
||||
raw_output.append(document)
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
yaml = YAML(typ='safe')
|
||||
|
||||
for document in yaml.load_all(data):
|
||||
raw_output.append(document)
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
|
||||
247
jc/tracebackplus.py
Normal file
247
jc/tracebackplus.py
Normal file
@@ -0,0 +1,247 @@
|
||||
"""More comprehensive traceback formatting for Python scripts.
|
||||
To enable this module, do:
|
||||
import tracebackplus; tracebackplus.enable()
|
||||
at the top of your script. The optional arguments to enable() are:
|
||||
logdir - if set, tracebacks are written to files in this directory
|
||||
context - number of lines of source code to show for each stack frame
|
||||
By default, tracebacks are displayed but not saved and the context is 5 lines.
|
||||
Alternatively, if you have caught an exception and want tracebackplus to display it
|
||||
for you, call tracebackplus.handler(). The optional argument to handler() is a
|
||||
3-item tuple (etype, evalue, etb) just like the value of sys.exc_info().
|
||||
"""
|
||||
|
||||
'''
|
||||
tracebackplus was derived from the cgitb standard library module. As cgitb is being
|
||||
deprecated, this simplified version of cgitb was created.
|
||||
|
||||
https://github.com/python/cpython/blob/3.8/Lib/cgitb.py
|
||||
|
||||
"Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
|
||||
2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020 Python Software Foundation;
|
||||
All Rights Reserved"
|
||||
|
||||
PYTHON SOFTWARE FOUNDATION LICENSE VERSION 2
|
||||
--------------------------------------------
|
||||
|
||||
1. This LICENSE AGREEMENT is between the Python Software Foundation
|
||||
("PSF"), and the Individual or Organization ("Licensee") accessing and
|
||||
otherwise using this software ("Python") in source or binary form and
|
||||
its associated documentation.
|
||||
|
||||
2. Subject to the terms and conditions of this License Agreement, PSF hereby
|
||||
grants Licensee a nonexclusive, royalty-free, world-wide license to reproduce,
|
||||
analyze, test, perform and/or display publicly, prepare derivative works,
|
||||
distribute, and otherwise use Python alone or in any derivative version,
|
||||
provided, however, that PSF's License Agreement and PSF's notice of copyright,
|
||||
i.e., "Copyright (c) 2001, 2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010,
|
||||
2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020 Python Software Foundation;
|
||||
All Rights Reserved" are retained in Python alone or in any derivative version
|
||||
prepared by Licensee.
|
||||
|
||||
3. In the event Licensee prepares a derivative work that is based on
|
||||
or incorporates Python or any part thereof, and wants to make
|
||||
the derivative work available to others as provided herein, then
|
||||
Licensee hereby agrees to include in any such work a brief summary of
|
||||
the changes made to Python.
|
||||
|
||||
4. PSF is making Python available to Licensee on an "AS IS"
|
||||
basis. PSF MAKES NO REPRESENTATIONS OR WARRANTIES, EXPRESS OR
|
||||
IMPLIED. BY WAY OF EXAMPLE, BUT NOT LIMITATION, PSF MAKES NO AND
|
||||
DISCLAIMS ANY REPRESENTATION OR WARRANTY OF MERCHANTABILITY OR FITNESS
|
||||
FOR ANY PARTICULAR PURPOSE OR THAT THE USE OF PYTHON WILL NOT
|
||||
INFRINGE ANY THIRD PARTY RIGHTS.
|
||||
|
||||
5. PSF SHALL NOT BE LIABLE TO LICENSEE OR ANY OTHER USERS OF PYTHON
|
||||
FOR ANY INCIDENTAL, SPECIAL, OR CONSEQUENTIAL DAMAGES OR LOSS AS
|
||||
A RESULT OF MODIFYING, DISTRIBUTING, OR OTHERWISE USING PYTHON,
|
||||
OR ANY DERIVATIVE THEREOF, EVEN IF ADVISED OF THE POSSIBILITY THEREOF.
|
||||
|
||||
6. This License Agreement will automatically terminate upon a material
|
||||
breach of its terms and conditions.
|
||||
|
||||
7. Nothing in this License Agreement shall be deemed to create any
|
||||
relationship of agency, partnership, or joint venture between PSF and
|
||||
Licensee. This License Agreement does not grant permission to use PSF
|
||||
trademarks or trade name in a trademark sense to endorse or promote
|
||||
products or services of Licensee, or any third party.
|
||||
|
||||
8. By copying, installing or otherwise using Python, Licensee
|
||||
agrees to be bound by the terms and conditions of this License
|
||||
Agreement.
|
||||
'''
|
||||
|
||||
import inspect
|
||||
import keyword
|
||||
import linecache
|
||||
import os
|
||||
import pydoc
|
||||
import sys
|
||||
import tempfile
|
||||
import time
|
||||
import tokenize
|
||||
import traceback
|
||||
|
||||
|
||||
__UNDEF__ = [] # a special sentinel object
|
||||
|
||||
|
||||
def lookup(name, frame, locals):
|
||||
"""Find the value for a given name in the given environment."""
|
||||
if name in locals:
|
||||
return 'local', locals[name]
|
||||
if name in frame.f_globals:
|
||||
return 'global', frame.f_globals[name]
|
||||
if '__builtins__' in frame.f_globals:
|
||||
builtins = frame.f_globals['__builtins__']
|
||||
if isinstance(builtins, dict):
|
||||
if name in builtins:
|
||||
return 'builtin', builtins[name]
|
||||
else:
|
||||
if hasattr(builtins, name):
|
||||
return 'builtin', getattr(builtins, name)
|
||||
return None, __UNDEF__
|
||||
|
||||
|
||||
def scanvars(reader, frame, locals):
|
||||
"""Scan one logical line of Python and look up values of variables used."""
|
||||
vars, lasttoken, parent, prefix, value = [], None, None, '', __UNDEF__
|
||||
for ttype, token, start, end, line in tokenize.generate_tokens(reader):
|
||||
if ttype == tokenize.NEWLINE:
|
||||
break
|
||||
if ttype == tokenize.NAME and token not in keyword.kwlist:
|
||||
if lasttoken == '.':
|
||||
if parent is not __UNDEF__:
|
||||
value = getattr(parent, token, __UNDEF__)
|
||||
vars.append((prefix + token, prefix, value))
|
||||
else:
|
||||
where, value = lookup(token, frame, locals)
|
||||
vars.append((token, where, value))
|
||||
elif token == '.':
|
||||
prefix += lasttoken + '.'
|
||||
parent = value
|
||||
else:
|
||||
parent, prefix = None, ''
|
||||
lasttoken = token
|
||||
return vars
|
||||
|
||||
|
||||
def text(einfo, context=5):
|
||||
"""Return a plain text document describing a given traceback."""
|
||||
etype, evalue, etb = einfo
|
||||
if isinstance(etype, type):
|
||||
etype = etype.__name__
|
||||
pyver = 'Python ' + sys.version.split()[0] + ': ' + sys.executable
|
||||
date = time.ctime(time.time())
|
||||
head = '%s\n%s\n%s\n' % (str(etype), pyver, date) + '''
|
||||
A problem occurred in a Python script. Here is the sequence of
|
||||
function calls leading up to the error, in the order they occurred.
|
||||
'''
|
||||
|
||||
frames = []
|
||||
records = inspect.getinnerframes(etb, context)
|
||||
for frame, file, lnum, func, lines, index in records:
|
||||
file = file and os.path.abspath(file) or '?'
|
||||
args, varargs, varkw, locals = inspect.getargvalues(frame)
|
||||
call = ''
|
||||
if func != '?':
|
||||
call = 'in ' + func + \
|
||||
inspect.formatargvalues(args, varargs, varkw, locals,
|
||||
formatvalue=lambda value: '=' + pydoc.text.repr(value))
|
||||
|
||||
highlight = {}
|
||||
|
||||
def reader(lnum=[lnum]):
|
||||
highlight[lnum[0]] = 1
|
||||
try:
|
||||
return linecache.getline(file, lnum[0])
|
||||
finally:
|
||||
lnum[0] += 1
|
||||
vars = scanvars(reader, frame, locals)
|
||||
|
||||
rows = [' %s %s' % (file, call)]
|
||||
if index is not None:
|
||||
i = lnum - index
|
||||
for line in lines:
|
||||
num = '%5d ' % i
|
||||
rows.append(num + line.rstrip())
|
||||
i += 1
|
||||
|
||||
done, dump = {}, []
|
||||
for name, where, value in vars:
|
||||
if name in done:
|
||||
continue
|
||||
done[name] = 1
|
||||
if value is not __UNDEF__:
|
||||
if where == 'global':
|
||||
name = 'global ' + name
|
||||
elif where != 'local':
|
||||
name = where + name.split('.')[-1]
|
||||
dump.append('%s = %s' % (name, pydoc.text.repr(value)))
|
||||
else:
|
||||
dump.append(name + ' undefined')
|
||||
|
||||
rows.append('\n'.join(dump))
|
||||
frames.append('\n%s\n' % '\n'.join(rows))
|
||||
|
||||
exception = ['%s: %s' % (str(etype), str(evalue))]
|
||||
for name in dir(evalue):
|
||||
value = pydoc.text.repr(getattr(evalue, name))
|
||||
exception.append('\n%s%s = %s' % (' ' * 4, name, value))
|
||||
|
||||
return head + ''.join(frames) + ''.join(exception) + '''
|
||||
|
||||
The above is a description of an error in a Python program. Here is
|
||||
the original traceback:
|
||||
|
||||
%s
|
||||
''' % ''.join(traceback.format_exception(etype, evalue, etb))
|
||||
|
||||
|
||||
class Hook:
|
||||
"""A hook to replace sys.excepthook"""
|
||||
|
||||
def __init__(self, logdir=None, context=5, file=None):
|
||||
self.logdir = logdir # log tracebacks to files if not None
|
||||
self.context = context # number of source code lines per frame
|
||||
self.file = file or sys.stdout # place to send the output
|
||||
|
||||
def __call__(self, etype, evalue, etb):
|
||||
self.handle((etype, evalue, etb))
|
||||
|
||||
def handle(self, info=None):
|
||||
info = info or sys.exc_info()
|
||||
|
||||
formatter = text
|
||||
|
||||
try:
|
||||
doc = formatter(info, self.context)
|
||||
except: # just in case something goes wrong
|
||||
doc = ''.join(traceback.format_exception(*info))
|
||||
|
||||
self.file.write(doc + '\n')
|
||||
|
||||
if self.logdir is not None:
|
||||
suffix = '.txt'
|
||||
(fd, path) = tempfile.mkstemp(suffix=suffix, dir=self.logdir)
|
||||
|
||||
try:
|
||||
with os.fdopen(fd, 'w') as file:
|
||||
file.write(doc)
|
||||
msg = '%s contains the description of this error.' % path
|
||||
except:
|
||||
msg = 'Tried to save traceback to %s, but failed.' % path
|
||||
|
||||
self.file.write(msg + '\n')
|
||||
|
||||
try:
|
||||
self.file.flush()
|
||||
except:
|
||||
pass
|
||||
|
||||
|
||||
handler = Hook().handle
|
||||
|
||||
|
||||
def enable(logdir=None, context=5):
|
||||
"""Install an exception handler that sends verbose tracebacks to STDOUT."""
|
||||
sys.excepthook = Hook(logdir=logdir, context=context)
|
||||
27
jc/utils.py
27
jc/utils.py
@@ -56,7 +56,30 @@ def compatibility(mod_name, compatible):
|
||||
|
||||
no return, just prints output to STDERR
|
||||
"""
|
||||
if sys.platform not in compatible:
|
||||
platform_found = False
|
||||
|
||||
for platform in compatible:
|
||||
if sys.platform.startswith(platform):
|
||||
platform_found = True
|
||||
break
|
||||
|
||||
if not platform_found:
|
||||
mod = mod_name.split('.')[-1]
|
||||
compat_list = ', '.join(compatible)
|
||||
warning_message(f'{mod} parser not compatible with your OS ({sys.platform}).\n Compatible platforms: {compat_list}')
|
||||
warning_message(f'{mod} parser not compatible with your OS ({sys.platform}).\n'
|
||||
f' Compatible platforms: {compat_list}')
|
||||
|
||||
|
||||
def has_data(data):
|
||||
"""
|
||||
Checks if the input contains data. If there are any non-whitespace characters then return True, else return False
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) input to check whether it contains data
|
||||
|
||||
Returns:
|
||||
|
||||
Boolean True if input string (data) contains non-whitespace characters, otherwise False
|
||||
"""
|
||||
return True if data and not data.isspace() else False
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user