mirror of
https://github.com/kellyjonbrazil/jc.git
synced 2026-04-07 17:57:03 +02:00
Compare commits
124 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2bccd14c5d | ||
|
|
0d4823c9de | ||
|
|
2a14f56b72 | ||
|
|
fe49759598 | ||
|
|
ee737a59eb | ||
|
|
517ab10930 | ||
|
|
604fb574be | ||
|
|
a254ee8d88 | ||
|
|
f784a7a76d | ||
|
|
2e33afbe18 | ||
|
|
103bb174fc | ||
|
|
2a76a64fa1 | ||
|
|
c8fb56c601 | ||
|
|
e835227027 | ||
|
|
88ffcaee56 | ||
|
|
a9ba98847c | ||
|
|
2630049ab7 | ||
|
|
47c7e081f3 | ||
|
|
ef7f755614 | ||
|
|
32bd7ffbf6 | ||
|
|
347097a294 | ||
|
|
356857f5d6 | ||
|
|
ee12c52291 | ||
|
|
0e7ebf4dc1 | ||
|
|
f1e0cec9d6 | ||
|
|
d96a2a8623 | ||
|
|
1b1bc46222 | ||
|
|
d5a8b4eed2 | ||
|
|
5ddd4f0e86 | ||
|
|
8b94c326de | ||
|
|
29b012e66d | ||
|
|
572a3207cd | ||
|
|
2a88f2be6b | ||
|
|
3de6eac1ad | ||
|
|
f44260603e | ||
|
|
1c60f5355e | ||
|
|
40fa78a966 | ||
|
|
71db67ef49 | ||
|
|
1cd723b48f | ||
|
|
82ee4d7b30 | ||
|
|
dfd19f38f3 | ||
|
|
2358c883d0 | ||
|
|
79e4f3a761 | ||
|
|
5be45622cc | ||
|
|
5f4136b943 | ||
|
|
941bfe2724 | ||
|
|
3ed44a26d9 | ||
|
|
b7270517bd | ||
|
|
bf63ac93c6 | ||
|
|
1cb80f15c2 | ||
|
|
b5c22c6e53 | ||
|
|
7951366117 | ||
|
|
c78a4bb655 | ||
|
|
8aceda18b9 | ||
|
|
e0c75a9b6b | ||
|
|
fd283f6cf7 | ||
|
|
b881ad4ec0 | ||
|
|
2fcb32e26f | ||
|
|
13a802225b | ||
|
|
88649a4e8d | ||
|
|
5c6fa5bff6 | ||
|
|
b70025d6d6 | ||
|
|
59b89ecbd4 | ||
|
|
8f7502ff0f | ||
|
|
249d93f15c | ||
|
|
b0cf2e2d78 | ||
|
|
264fcd40ad | ||
|
|
54def8ef49 | ||
|
|
63c271b837 | ||
|
|
741b2d1c1d | ||
|
|
47d4335890 | ||
|
|
81f721f1ab | ||
|
|
c4e1068895 | ||
|
|
a77bb4165a | ||
|
|
3cd2dce496 | ||
|
|
46a8978740 | ||
|
|
3161c48939 | ||
|
|
a89a9187f8 | ||
|
|
d9e0aa5b93 | ||
|
|
d298e101e9 | ||
|
|
cea975d7f1 | ||
|
|
1ed69f9e6a | ||
|
|
ab0e05ec82 | ||
|
|
c16cce4bf0 | ||
|
|
d3489536a1 | ||
|
|
041050ce28 | ||
|
|
7de1a8a5d6 | ||
|
|
d4604743d1 | ||
|
|
0b8fb31298 | ||
|
|
dcdd79e28c | ||
|
|
5291baeb8e | ||
|
|
6867102c66 | ||
|
|
36ed2c7e2e | ||
|
|
4ab0aba9d3 | ||
|
|
e643badaf7 | ||
|
|
d96e96219e | ||
|
|
e42af3353e | ||
|
|
4ec2b16f42 | ||
|
|
0a028456bf | ||
|
|
a1f10928e1 | ||
|
|
eae1d4b89a | ||
|
|
d3c7cec333 | ||
|
|
36fa08d711 | ||
|
|
a9958841e4 | ||
|
|
504ad81a01 | ||
|
|
8bf2f4f4d0 | ||
|
|
805397ea18 | ||
|
|
1b3985c2d7 | ||
|
|
f602043642 | ||
|
|
1a1aa8fda3 | ||
|
|
3249a017ae | ||
|
|
84f0246b2d | ||
|
|
1c795982b0 | ||
|
|
c5164b4108 | ||
|
|
dc3716ecb3 | ||
|
|
c5165ccc21 | ||
|
|
5b2035e0e6 | ||
|
|
5205154aaf | ||
|
|
f500de3af6 | ||
|
|
4b028b5080 | ||
|
|
4cd721be85 | ||
|
|
d58ca402a7 | ||
|
|
5386879040 | ||
|
|
f19a1f23a9 |
4
.github/workflows/pythonapp.yml
vendored
4
.github/workflows/pythonapp.yml
vendored
@@ -14,12 +14,12 @@ jobs:
|
||||
strategy:
|
||||
matrix:
|
||||
os: [macos-latest, ubuntu-20.04, windows-latest]
|
||||
python-version: ["3.6", "3.7", "3.8", "3.9", "3.10", "3.11"]
|
||||
python-version: ["3.6", "3.7", "3.8", "3.9", "3.10", "3.11", "3.12"]
|
||||
|
||||
steps:
|
||||
- uses: actions/checkout@v3
|
||||
- name: "Set up timezone to America/Los_Angeles"
|
||||
uses: szenius/set-timezone@v1.0
|
||||
uses: szenius/set-timezone@v1.2
|
||||
with:
|
||||
timezoneLinux: "America/Los_Angeles"
|
||||
timezoneMacos: "America/Los_Angeles"
|
||||
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -6,3 +6,4 @@ build/
|
||||
.github/
|
||||
.vscode/
|
||||
_config.yml
|
||||
.venv
|
||||
|
||||
44
CHANGELOG
44
CHANGELOG
@@ -1,5 +1,48 @@
|
||||
jc changelog
|
||||
|
||||
20231216 v1.24.0
|
||||
- Add `debconf-show` command parser
|
||||
- Add `iftop` command parser
|
||||
- Add `pkg-index-apk` parser for Alpine Linux Package Index files
|
||||
- Add `pkg-index-deb` parser for Debian/Ubuntu Package Index files
|
||||
- Add `proc-cmdline` parser for `/proc/cmdline` file
|
||||
- Add `swapon` command parser
|
||||
- Add `tune2fs` command parser
|
||||
- Remove `iso-datetime` parser deprecated since v1.22.1. (use `datetime-iso` instead)
|
||||
- Update timezone change in Github Actions for node v16 requirement
|
||||
- Add Python 3.12 tests to Github Actions
|
||||
- Refactor `acpi` command parser for code cleanup
|
||||
- Refactor vendored libraries to remove Python 2 support
|
||||
- Fix `iptables` parser for cases where the `target` field is blank in a rule
|
||||
- Fix `vmstat` parsers for some cases where wide output is used
|
||||
- Fix `mount` parser for cases with spaces in the mount point name
|
||||
- Fix `xrandr` parser for infinite loop issues
|
||||
|
||||
20231023 v1.23.6
|
||||
- Fix XML parser for xmltodict library versions < 0.13.0
|
||||
- Fix `who` command parser for cases when the from field contains spaces
|
||||
|
||||
20231021 v1.23.5
|
||||
- Add `host` command parser
|
||||
- Add `nsd-control` command parser
|
||||
- Add `lsb_release` command parser
|
||||
- Add `/etc/os-release` file parser
|
||||
- Enhance `env` command parser to support multi-line values
|
||||
- Enhance `ping` and `ping-s` parsers to add error and corrupted support
|
||||
- Enhance `xml` parser to include comments in the JSON output
|
||||
- Fix `pidstat` command parser when using `-T ALL`
|
||||
- Fix `x509-cert` parser to allow negative serial numbers
|
||||
- Fix `x509-cert` parser for cases when bitstrings are larger than standard
|
||||
- Fix `xrandr` command parser for associated device issues
|
||||
- Fix error when pygments library is not installed
|
||||
|
||||
20230730 v1.23.4
|
||||
- Add `/etc/resolve.conf` file parser
|
||||
- Add `/proc/net/tcp` and `/proc/net/tcp6` file parser
|
||||
- Add `find` command parser
|
||||
- Add `ip route` command parser
|
||||
- Fix `certbot` command parser to be more robust with different line endings
|
||||
|
||||
20230621 v1.23.3
|
||||
- Add `lsattr` command parser
|
||||
- Add `srt` file parser
|
||||
@@ -13,7 +56,6 @@ jc changelog
|
||||
- Enhance `ss` command parser to support extended options
|
||||
- Enhance the compatibility warning message
|
||||
- Fix `bluetoothctl` command parser for some mouse devices
|
||||
- Fix `certbot` command parser to be more robust with different line endings
|
||||
- Fix `ping` command parsers for output with missing hostname
|
||||
- Fix `stat` command parser for older versions that may not contain all fields
|
||||
- Fix deprecated option in `setup.cfg`
|
||||
|
||||
13
README.md
13
README.md
@@ -120,6 +120,7 @@ pip3 install jc
|
||||
| NixOS linux | `nix-env -iA nixpkgs.jc` or `nix-env -iA nixos.jc` |
|
||||
| Guix System linux | `guix install jc` |
|
||||
| Gentoo Linux | `emerge dev-python/jc` |
|
||||
| Photon linux | `tdnf install jc` |
|
||||
| macOS | `brew install jc` |
|
||||
| FreeBSD | `portsnap fetch update && cd /usr/ports/textproc/py-jc && make install clean` |
|
||||
| Ansible filter plugin | `ansible-galaxy collection install community.general` |
|
||||
@@ -178,6 +179,7 @@ option.
|
||||
| `--csv-s` | CSV file streaming parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/csv_s) |
|
||||
| `--date` | `date` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/date) |
|
||||
| `--datetime-iso` | ISO 8601 Datetime string parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/datetime_iso) |
|
||||
| `--debconf-show` | `debconf-show` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/debconf_show) |
|
||||
| `--df` | `df` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/df) |
|
||||
| `--dig` | `dig` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/dig) |
|
||||
| `--dir` | `dir` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/dir) |
|
||||
@@ -187,6 +189,7 @@ option.
|
||||
| `--email-address` | Email Address string parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/email_address) |
|
||||
| `--env` | `env` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/env) |
|
||||
| `--file` | `file` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/file) |
|
||||
| `--find` | `find` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/find) |
|
||||
| `--findmnt` | `findmnt` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/findmnt) |
|
||||
| `--finger` | `finger` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/finger) |
|
||||
| `--free` | `free` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/free) |
|
||||
@@ -201,6 +204,7 @@ option.
|
||||
| `--hashsum` | hashsum command parser (`md5sum`, `shasum`, etc.) | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/hashsum) |
|
||||
| `--hciconfig` | `hciconfig` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/hciconfig) |
|
||||
| `--history` | `history` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/history) |
|
||||
| `--host` | `host` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/host) |
|
||||
| `--hosts` | `/etc/hosts` file parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/hosts) |
|
||||
| `--id` | `id` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/id) |
|
||||
| `--ifconfig` | `ifconfig` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/ifconfig) |
|
||||
@@ -210,6 +214,7 @@ option.
|
||||
| `--iostat-s` | `iostat` command streaming parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/iostat_s) |
|
||||
| `--ip-address` | IPv4 and IPv6 Address string parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/ip_address) |
|
||||
| `--iptables` | `iptables` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/iptables) |
|
||||
| `--ip-route` | `ip route` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/ip_route) |
|
||||
| `--iw-scan` | `iw dev [device] scan` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/iw_scan) |
|
||||
| `--iwconfig` | `iwconfig` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/iwconfig) |
|
||||
| `--jar-manifest` | Java MANIFEST.MF file parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/jar_manifest) |
|
||||
@@ -220,6 +225,7 @@ option.
|
||||
| `--ls` | `ls` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/ls) |
|
||||
| `--ls-s` | `ls` command streaming parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/ls_s) |
|
||||
| `--lsattr` | `lsattr` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/lsattr) |
|
||||
| `--lsb-release` | `lsb_release` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/lsb_release) |
|
||||
| `--lsblk` | `lsblk` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/lsblk) |
|
||||
| `--lsmod` | `lsmod` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/lsmod) |
|
||||
| `--lsof` | `lsof` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/lsof) |
|
||||
@@ -232,9 +238,11 @@ option.
|
||||
| `--mpstat-s` | `mpstat` command streaming parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/mpstat_s) |
|
||||
| `--netstat` | `netstat` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/netstat) |
|
||||
| `--nmcli` | `nmcli` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/nmcli) |
|
||||
| `--nsd-control` | `nsd-control` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/nsd_control) |
|
||||
| `--ntpq` | `ntpq -p` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/ntpq) |
|
||||
| `--openvpn` | openvpn-status.log file parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/openvpn) |
|
||||
| `--os-prober` | `os-prober` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/os_prober) |
|
||||
| `--os-release` | `/etc/os-release` file parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/os_release) |
|
||||
| `--passwd` | `/etc/passwd` file parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/passwd) |
|
||||
| `--pci-ids` | `pci.ids` file parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/pci_ids) |
|
||||
| `--pgpass` | PostgreSQL password file parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/pgpass) |
|
||||
@@ -244,10 +252,13 @@ option.
|
||||
| `--ping-s` | `ping` and `ping6` command streaming parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/ping_s) |
|
||||
| `--pip-list` | `pip list` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/pip_list) |
|
||||
| `--pip-show` | `pip show` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/pip_show) |
|
||||
| `--pkg-index-apk` | Alpine Linux Package Index file parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/pkg_index_apk) |
|
||||
| `--pkg-index-deb` | Debian Package Index file parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/pkg_index_deb) |
|
||||
| `--plist` | PLIST file parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/plist) |
|
||||
| `--postconf` | `postconf -M` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/postconf) |
|
||||
| `--proc` | `/proc/` file parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/proc) |
|
||||
| `--ps` | `ps` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/ps) |
|
||||
| `--resolve-conf` | `/etc/resolve.conf` file parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/resolve_conf) |
|
||||
| `--route` | `route` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/route) |
|
||||
| `--rpm-qi` | `rpm -qi` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/rpm_qi) |
|
||||
| `--rsync` | `rsync` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/rsync) |
|
||||
@@ -261,6 +272,7 @@ option.
|
||||
| `--sshd-conf` | `sshd` config file and `sshd -T` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/sshd_conf) |
|
||||
| `--stat` | `stat` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/stat) |
|
||||
| `--stat-s` | `stat` command streaming parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/stat_s) |
|
||||
| `--swapon` | `swapon` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/swapon) |
|
||||
| `--sysctl` | `sysctl` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/sysctl) |
|
||||
| `--syslog` | Syslog RFC 5424 string parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/syslog) |
|
||||
| `--syslog-s` | Syslog RFC 5424 string streaming parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/syslog_s) |
|
||||
@@ -279,6 +291,7 @@ option.
|
||||
| `--top-s` | `top -b` command streaming parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/top_s) |
|
||||
| `--tracepath` | `tracepath` and `tracepath6` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/tracepath) |
|
||||
| `--traceroute` | `traceroute` and `traceroute6` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/traceroute) |
|
||||
| `--tune2fs` | `tune2fs -l` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/tune2fs) |
|
||||
| `--udevadm` | `udevadm info` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/udevadm) |
|
||||
| `--ufw` | `ufw status` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/ufw) |
|
||||
| `--ufw-appinfo` | `ufw app info [application]` command parser | [details](https://kellyjonbrazil.github.io/jc/docs/parsers/ufw_appinfo) |
|
||||
|
||||
@@ -3,8 +3,8 @@ _jc()
|
||||
local cur prev words cword jc_commands jc_parsers jc_options \
|
||||
jc_about_options jc_about_mod_options jc_help_options jc_special_options
|
||||
|
||||
jc_commands=(acpi airport arp blkid bluetoothctl cbt certbot chage cksum crontab date df dig dmidecode dpkg du env file findmnt finger free git gpg hciconfig id ifconfig iostat iptables iw iwconfig jobs last lastb ls lsattr lsblk lsmod lsof lspci lsusb md5 md5sum mdadm mount mpstat netstat nmcli ntpq os-prober pidstat ping ping6 pip pip3 postconf printenv ps route rpm rsync sfdisk sha1sum sha224sum sha256sum sha384sum sha512sum shasum ss ssh sshd stat sum sysctl systemctl systeminfo timedatectl top tracepath tracepath6 traceroute traceroute6 udevadm ufw uname update-alternatives upower uptime vdir veracrypt vmstat w wc who xrandr zipinfo zpool)
|
||||
jc_parsers=(--acpi --airport --airport-s --arp --asciitable --asciitable-m --blkid --bluetoothctl --cbt --cef --cef-s --certbot --chage --cksum --clf --clf-s --crontab --crontab-u --csv --csv-s --date --datetime-iso --df --dig --dir --dmidecode --dpkg-l --du --email-address --env --file --findmnt --finger --free --fstab --git-log --git-log-s --git-ls-remote --gpg --group --gshadow --hash --hashsum --hciconfig --history --hosts --id --ifconfig --ini --ini-dup --iostat --iostat-s --ip-address --iptables --iw-scan --iwconfig --jar-manifest --jobs --jwt --kv --last --ls --ls-s --lsattr --lsblk --lsmod --lsof --lspci --lsusb --m3u --mdadm --mount --mpstat --mpstat-s --netstat --nmcli --ntpq --openvpn --os-prober --passwd --pci-ids --pgpass --pidstat --pidstat-s --ping --ping-s --pip-list --pip-show --plist --postconf --proc --proc-buddyinfo --proc-consoles --proc-cpuinfo --proc-crypto --proc-devices --proc-diskstats --proc-filesystems --proc-interrupts --proc-iomem --proc-ioports --proc-loadavg --proc-locks --proc-meminfo --proc-modules --proc-mtrr --proc-pagetypeinfo --proc-partitions --proc-slabinfo --proc-softirqs --proc-stat --proc-swaps --proc-uptime --proc-version --proc-vmallocinfo --proc-vmstat --proc-zoneinfo --proc-driver-rtc --proc-net-arp --proc-net-dev --proc-net-dev-mcast --proc-net-if-inet6 --proc-net-igmp --proc-net-igmp6 --proc-net-ipv6-route --proc-net-netlink --proc-net-netstat --proc-net-packet --proc-net-protocols --proc-net-route --proc-net-unix --proc-pid-fdinfo --proc-pid-io --proc-pid-maps --proc-pid-mountinfo --proc-pid-numa-maps --proc-pid-smaps --proc-pid-stat --proc-pid-statm --proc-pid-status --ps --route --rpm-qi --rsync --rsync-s --semver --sfdisk --shadow --srt --ss --ssh-conf --sshd-conf --stat --stat-s --sysctl --syslog --syslog-s --syslog-bsd --syslog-bsd-s --systemctl --systemctl-lj --systemctl-ls --systemctl-luf --systeminfo --time --timedatectl --timestamp --toml --top --top-s --tracepath --traceroute --udevadm --ufw --ufw-appinfo --uname --update-alt-gs --update-alt-q --upower --uptime --url --ver --veracrypt --vmstat --vmstat-s --w --wc --who --x509-cert --x509-csr --xml --xrandr --yaml --zipinfo --zpool-iostat --zpool-status)
|
||||
jc_commands=(acpi airport arp blkid bluetoothctl cbt certbot chage cksum crontab date debconf-show df dig dmidecode dpkg du env file findmnt finger free git gpg hciconfig host id ifconfig iostat ip iptables iw iwconfig jobs last lastb ls lsattr lsb_release lsblk lsmod lsof lspci lsusb md5 md5sum mdadm mount mpstat netstat nmcli nsd-control ntpq os-prober pidstat ping ping6 pip pip3 postconf printenv ps route rpm rsync sfdisk sha1sum sha224sum sha256sum sha384sum sha512sum shasum ss ssh sshd stat sum swapon sysctl systemctl systeminfo timedatectl top tracepath tracepath6 traceroute traceroute6 tune2fs udevadm ufw uname update-alternatives upower uptime vdir veracrypt vmstat w wc who xrandr zipinfo zpool)
|
||||
jc_parsers=(--acpi --airport --airport-s --arp --asciitable --asciitable-m --blkid --bluetoothctl --cbt --cef --cef-s --certbot --chage --cksum --clf --clf-s --crontab --crontab-u --csv --csv-s --date --datetime-iso --debconf-show --df --dig --dir --dmidecode --dpkg-l --du --email-address --env --file --find --findmnt --finger --free --fstab --git-log --git-log-s --git-ls-remote --gpg --group --gshadow --hash --hashsum --hciconfig --history --host --hosts --id --ifconfig --ini --ini-dup --iostat --iostat-s --ip-address --iptables --ip-route --iw-scan --iwconfig --jar-manifest --jobs --jwt --kv --last --ls --ls-s --lsattr --lsb-release --lsblk --lsmod --lsof --lspci --lsusb --m3u --mdadm --mount --mpstat --mpstat-s --netstat --nmcli --nsd-control --ntpq --openvpn --os-prober --os-release --passwd --pci-ids --pgpass --pidstat --pidstat-s --ping --ping-s --pip-list --pip-show --pkg-index-apk --pkg-index-deb --plist --postconf --proc --proc-buddyinfo --proc-cmdline --proc-consoles --proc-cpuinfo --proc-crypto --proc-devices --proc-diskstats --proc-filesystems --proc-interrupts --proc-iomem --proc-ioports --proc-loadavg --proc-locks --proc-meminfo --proc-modules --proc-mtrr --proc-pagetypeinfo --proc-partitions --proc-slabinfo --proc-softirqs --proc-stat --proc-swaps --proc-uptime --proc-version --proc-vmallocinfo --proc-vmstat --proc-zoneinfo --proc-driver-rtc --proc-net-arp --proc-net-dev --proc-net-dev-mcast --proc-net-if-inet6 --proc-net-igmp --proc-net-igmp6 --proc-net-ipv6-route --proc-net-netlink --proc-net-netstat --proc-net-packet --proc-net-protocols --proc-net-route --proc-net-tcp --proc-net-unix --proc-pid-fdinfo --proc-pid-io --proc-pid-maps --proc-pid-mountinfo --proc-pid-numa-maps --proc-pid-smaps --proc-pid-stat --proc-pid-statm --proc-pid-status --ps --resolve-conf --route --rpm-qi --rsync --rsync-s --semver --sfdisk --shadow --srt --ss --ssh-conf --sshd-conf --stat --stat-s --swapon --sysctl --syslog --syslog-s --syslog-bsd --syslog-bsd-s --systemctl --systemctl-lj --systemctl-ls --systemctl-luf --systeminfo --time --timedatectl --timestamp --toml --top --top-s --tracepath --traceroute --tune2fs --udevadm --ufw --ufw-appinfo --uname --update-alt-gs --update-alt-q --upower --uptime --url --ver --veracrypt --vmstat --vmstat-s --w --wc --who --x509-cert --x509-csr --xml --xrandr --yaml --zipinfo --zpool-iostat --zpool-status)
|
||||
jc_options=(--force-color -C --debug -d --monochrome -m --meta-out -M --pretty -p --quiet -q --raw -r --unbuffer -u --yaml-out -y)
|
||||
jc_about_options=(--about -a)
|
||||
jc_about_mod_options=(--pretty -p --yaml-out -y --monochrome -m --force-color -C)
|
||||
|
||||
@@ -9,7 +9,7 @@ _jc() {
|
||||
jc_help_options jc_help_options_describe \
|
||||
jc_special_options jc_special_options_describe
|
||||
|
||||
jc_commands=(acpi airport arp blkid bluetoothctl cbt certbot chage cksum crontab date df dig dmidecode dpkg du env file findmnt finger free git gpg hciconfig id ifconfig iostat iptables iw iwconfig jobs last lastb ls lsattr lsblk lsmod lsof lspci lsusb md5 md5sum mdadm mount mpstat netstat nmcli ntpq os-prober pidstat ping ping6 pip pip3 postconf printenv ps route rpm rsync sfdisk sha1sum sha224sum sha256sum sha384sum sha512sum shasum ss ssh sshd stat sum sysctl systemctl systeminfo timedatectl top tracepath tracepath6 traceroute traceroute6 udevadm ufw uname update-alternatives upower uptime vdir veracrypt vmstat w wc who xrandr zipinfo zpool)
|
||||
jc_commands=(acpi airport arp blkid bluetoothctl cbt certbot chage cksum crontab date debconf-show df dig dmidecode dpkg du env file findmnt finger free git gpg hciconfig host id ifconfig iostat ip iptables iw iwconfig jobs last lastb ls lsattr lsb_release lsblk lsmod lsof lspci lsusb md5 md5sum mdadm mount mpstat netstat nmcli nsd-control ntpq os-prober pidstat ping ping6 pip pip3 postconf printenv ps route rpm rsync sfdisk sha1sum sha224sum sha256sum sha384sum sha512sum shasum ss ssh sshd stat sum swapon sysctl systemctl systeminfo timedatectl top tracepath tracepath6 traceroute traceroute6 tune2fs udevadm ufw uname update-alternatives upower uptime vdir veracrypt vmstat w wc who xrandr zipinfo zpool)
|
||||
jc_commands_describe=(
|
||||
'acpi:run "acpi" command with magic syntax.'
|
||||
'airport:run "airport" command with magic syntax.'
|
||||
@@ -22,6 +22,7 @@ _jc() {
|
||||
'cksum:run "cksum" command with magic syntax.'
|
||||
'crontab:run "crontab" command with magic syntax.'
|
||||
'date:run "date" command with magic syntax.'
|
||||
'debconf-show:run "debconf-show" command with magic syntax.'
|
||||
'df:run "df" command with magic syntax.'
|
||||
'dig:run "dig" command with magic syntax.'
|
||||
'dmidecode:run "dmidecode" command with magic syntax.'
|
||||
@@ -35,9 +36,11 @@ _jc() {
|
||||
'git:run "git" command with magic syntax.'
|
||||
'gpg:run "gpg" command with magic syntax.'
|
||||
'hciconfig:run "hciconfig" command with magic syntax.'
|
||||
'host:run "host" command with magic syntax.'
|
||||
'id:run "id" command with magic syntax.'
|
||||
'ifconfig:run "ifconfig" command with magic syntax.'
|
||||
'iostat:run "iostat" command with magic syntax.'
|
||||
'ip:run "ip" command with magic syntax.'
|
||||
'iptables:run "iptables" command with magic syntax.'
|
||||
'iw:run "iw" command with magic syntax.'
|
||||
'iwconfig:run "iwconfig" command with magic syntax.'
|
||||
@@ -46,6 +49,7 @@ _jc() {
|
||||
'lastb:run "lastb" command with magic syntax.'
|
||||
'ls:run "ls" command with magic syntax.'
|
||||
'lsattr:run "lsattr" command with magic syntax.'
|
||||
'lsb_release:run "lsb_release" command with magic syntax.'
|
||||
'lsblk:run "lsblk" command with magic syntax.'
|
||||
'lsmod:run "lsmod" command with magic syntax.'
|
||||
'lsof:run "lsof" command with magic syntax.'
|
||||
@@ -58,6 +62,7 @@ _jc() {
|
||||
'mpstat:run "mpstat" command with magic syntax.'
|
||||
'netstat:run "netstat" command with magic syntax.'
|
||||
'nmcli:run "nmcli" command with magic syntax.'
|
||||
'nsd-control:run "nsd-control" command with magic syntax.'
|
||||
'ntpq:run "ntpq" command with magic syntax.'
|
||||
'os-prober:run "os-prober" command with magic syntax.'
|
||||
'pidstat:run "pidstat" command with magic syntax.'
|
||||
@@ -83,6 +88,7 @@ _jc() {
|
||||
'sshd:run "sshd" command with magic syntax.'
|
||||
'stat:run "stat" command with magic syntax.'
|
||||
'sum:run "sum" command with magic syntax.'
|
||||
'swapon:run "swapon" command with magic syntax.'
|
||||
'sysctl:run "sysctl" command with magic syntax.'
|
||||
'systemctl:run "systemctl" command with magic syntax.'
|
||||
'systeminfo:run "systeminfo" command with magic syntax.'
|
||||
@@ -92,6 +98,7 @@ _jc() {
|
||||
'tracepath6:run "tracepath6" command with magic syntax.'
|
||||
'traceroute:run "traceroute" command with magic syntax.'
|
||||
'traceroute6:run "traceroute6" command with magic syntax.'
|
||||
'tune2fs:run "tune2fs" command with magic syntax.'
|
||||
'udevadm:run "udevadm" command with magic syntax.'
|
||||
'ufw:run "ufw" command with magic syntax.'
|
||||
'uname:run "uname" command with magic syntax.'
|
||||
@@ -108,7 +115,7 @@ _jc() {
|
||||
'zipinfo:run "zipinfo" command with magic syntax.'
|
||||
'zpool:run "zpool" command with magic syntax.'
|
||||
)
|
||||
jc_parsers=(--acpi --airport --airport-s --arp --asciitable --asciitable-m --blkid --bluetoothctl --cbt --cef --cef-s --certbot --chage --cksum --clf --clf-s --crontab --crontab-u --csv --csv-s --date --datetime-iso --df --dig --dir --dmidecode --dpkg-l --du --email-address --env --file --findmnt --finger --free --fstab --git-log --git-log-s --git-ls-remote --gpg --group --gshadow --hash --hashsum --hciconfig --history --hosts --id --ifconfig --ini --ini-dup --iostat --iostat-s --ip-address --iptables --iw-scan --iwconfig --jar-manifest --jobs --jwt --kv --last --ls --ls-s --lsattr --lsblk --lsmod --lsof --lspci --lsusb --m3u --mdadm --mount --mpstat --mpstat-s --netstat --nmcli --ntpq --openvpn --os-prober --passwd --pci-ids --pgpass --pidstat --pidstat-s --ping --ping-s --pip-list --pip-show --plist --postconf --proc --proc-buddyinfo --proc-consoles --proc-cpuinfo --proc-crypto --proc-devices --proc-diskstats --proc-filesystems --proc-interrupts --proc-iomem --proc-ioports --proc-loadavg --proc-locks --proc-meminfo --proc-modules --proc-mtrr --proc-pagetypeinfo --proc-partitions --proc-slabinfo --proc-softirqs --proc-stat --proc-swaps --proc-uptime --proc-version --proc-vmallocinfo --proc-vmstat --proc-zoneinfo --proc-driver-rtc --proc-net-arp --proc-net-dev --proc-net-dev-mcast --proc-net-if-inet6 --proc-net-igmp --proc-net-igmp6 --proc-net-ipv6-route --proc-net-netlink --proc-net-netstat --proc-net-packet --proc-net-protocols --proc-net-route --proc-net-unix --proc-pid-fdinfo --proc-pid-io --proc-pid-maps --proc-pid-mountinfo --proc-pid-numa-maps --proc-pid-smaps --proc-pid-stat --proc-pid-statm --proc-pid-status --ps --route --rpm-qi --rsync --rsync-s --semver --sfdisk --shadow --srt --ss --ssh-conf --sshd-conf --stat --stat-s --sysctl --syslog --syslog-s --syslog-bsd --syslog-bsd-s --systemctl --systemctl-lj --systemctl-ls --systemctl-luf --systeminfo --time --timedatectl --timestamp --toml --top --top-s --tracepath --traceroute --udevadm --ufw --ufw-appinfo --uname --update-alt-gs --update-alt-q --upower --uptime --url --ver --veracrypt --vmstat --vmstat-s --w --wc --who --x509-cert --x509-csr --xml --xrandr --yaml --zipinfo --zpool-iostat --zpool-status)
|
||||
jc_parsers=(--acpi --airport --airport-s --arp --asciitable --asciitable-m --blkid --bluetoothctl --cbt --cef --cef-s --certbot --chage --cksum --clf --clf-s --crontab --crontab-u --csv --csv-s --date --datetime-iso --debconf-show --df --dig --dir --dmidecode --dpkg-l --du --email-address --env --file --find --findmnt --finger --free --fstab --git-log --git-log-s --git-ls-remote --gpg --group --gshadow --hash --hashsum --hciconfig --history --host --hosts --id --ifconfig --ini --ini-dup --iostat --iostat-s --ip-address --iptables --ip-route --iw-scan --iwconfig --jar-manifest --jobs --jwt --kv --last --ls --ls-s --lsattr --lsb-release --lsblk --lsmod --lsof --lspci --lsusb --m3u --mdadm --mount --mpstat --mpstat-s --netstat --nmcli --nsd-control --ntpq --openvpn --os-prober --os-release --passwd --pci-ids --pgpass --pidstat --pidstat-s --ping --ping-s --pip-list --pip-show --pkg-index-apk --pkg-index-deb --plist --postconf --proc --proc-buddyinfo --proc-cmdline --proc-consoles --proc-cpuinfo --proc-crypto --proc-devices --proc-diskstats --proc-filesystems --proc-interrupts --proc-iomem --proc-ioports --proc-loadavg --proc-locks --proc-meminfo --proc-modules --proc-mtrr --proc-pagetypeinfo --proc-partitions --proc-slabinfo --proc-softirqs --proc-stat --proc-swaps --proc-uptime --proc-version --proc-vmallocinfo --proc-vmstat --proc-zoneinfo --proc-driver-rtc --proc-net-arp --proc-net-dev --proc-net-dev-mcast --proc-net-if-inet6 --proc-net-igmp --proc-net-igmp6 --proc-net-ipv6-route --proc-net-netlink --proc-net-netstat --proc-net-packet --proc-net-protocols --proc-net-route --proc-net-tcp --proc-net-unix --proc-pid-fdinfo --proc-pid-io --proc-pid-maps --proc-pid-mountinfo --proc-pid-numa-maps --proc-pid-smaps --proc-pid-stat --proc-pid-statm --proc-pid-status --ps --resolve-conf --route --rpm-qi --rsync --rsync-s --semver --sfdisk --shadow --srt --ss --ssh-conf --sshd-conf --stat --stat-s --swapon --sysctl --syslog --syslog-s --syslog-bsd --syslog-bsd-s --systemctl --systemctl-lj --systemctl-ls --systemctl-luf --systeminfo --time --timedatectl --timestamp --toml --top --top-s --tracepath --traceroute --tune2fs --udevadm --ufw --ufw-appinfo --uname --update-alt-gs --update-alt-q --upower --uptime --url --ver --veracrypt --vmstat --vmstat-s --w --wc --who --x509-cert --x509-csr --xml --xrandr --yaml --zipinfo --zpool-iostat --zpool-status)
|
||||
jc_parsers_describe=(
|
||||
'--acpi:`acpi` command parser'
|
||||
'--airport:`airport -I` command parser'
|
||||
@@ -132,6 +139,7 @@ _jc() {
|
||||
'--csv-s:CSV file streaming parser'
|
||||
'--date:`date` command parser'
|
||||
'--datetime-iso:ISO 8601 Datetime string parser'
|
||||
'--debconf-show:`debconf-show` command parser'
|
||||
'--df:`df` command parser'
|
||||
'--dig:`dig` command parser'
|
||||
'--dir:`dir` command parser'
|
||||
@@ -141,6 +149,7 @@ _jc() {
|
||||
'--email-address:Email Address string parser'
|
||||
'--env:`env` command parser'
|
||||
'--file:`file` command parser'
|
||||
'--find:`find` command parser'
|
||||
'--findmnt:`findmnt` command parser'
|
||||
'--finger:`finger` command parser'
|
||||
'--free:`free` command parser'
|
||||
@@ -155,6 +164,7 @@ _jc() {
|
||||
'--hashsum:hashsum command parser (`md5sum`, `shasum`, etc.)'
|
||||
'--hciconfig:`hciconfig` command parser'
|
||||
'--history:`history` command parser'
|
||||
'--host:`host` command parser'
|
||||
'--hosts:`/etc/hosts` file parser'
|
||||
'--id:`id` command parser'
|
||||
'--ifconfig:`ifconfig` command parser'
|
||||
@@ -164,6 +174,7 @@ _jc() {
|
||||
'--iostat-s:`iostat` command streaming parser'
|
||||
'--ip-address:IPv4 and IPv6 Address string parser'
|
||||
'--iptables:`iptables` command parser'
|
||||
'--ip-route:`ip route` command parser'
|
||||
'--iw-scan:`iw dev [device] scan` command parser'
|
||||
'--iwconfig:`iwconfig` command parser'
|
||||
'--jar-manifest:Java MANIFEST.MF file parser'
|
||||
@@ -174,6 +185,7 @@ _jc() {
|
||||
'--ls:`ls` command parser'
|
||||
'--ls-s:`ls` command streaming parser'
|
||||
'--lsattr:`lsattr` command parser'
|
||||
'--lsb-release:`lsb_release` command parser'
|
||||
'--lsblk:`lsblk` command parser'
|
||||
'--lsmod:`lsmod` command parser'
|
||||
'--lsof:`lsof` command parser'
|
||||
@@ -186,9 +198,11 @@ _jc() {
|
||||
'--mpstat-s:`mpstat` command streaming parser'
|
||||
'--netstat:`netstat` command parser'
|
||||
'--nmcli:`nmcli` command parser'
|
||||
'--nsd-control:`nsd-control` command parser'
|
||||
'--ntpq:`ntpq -p` command parser'
|
||||
'--openvpn:openvpn-status.log file parser'
|
||||
'--os-prober:`os-prober` command parser'
|
||||
'--os-release:`/etc/os-release` file parser'
|
||||
'--passwd:`/etc/passwd` file parser'
|
||||
'--pci-ids:`pci.ids` file parser'
|
||||
'--pgpass:PostgreSQL password file parser'
|
||||
@@ -198,10 +212,13 @@ _jc() {
|
||||
'--ping-s:`ping` and `ping6` command streaming parser'
|
||||
'--pip-list:`pip list` command parser'
|
||||
'--pip-show:`pip show` command parser'
|
||||
'--pkg-index-apk:Alpine Linux Package Index file parser'
|
||||
'--pkg-index-deb:Debian Package Index file parser'
|
||||
'--plist:PLIST file parser'
|
||||
'--postconf:`postconf -M` command parser'
|
||||
'--proc:`/proc/` file parser'
|
||||
'--proc-buddyinfo:`/proc/buddyinfo` file parser'
|
||||
'--proc-cmdline:`/proc/cmdline` file parser'
|
||||
'--proc-consoles:`/proc/consoles` file parser'
|
||||
'--proc-cpuinfo:`/proc/cpuinfo` file parser'
|
||||
'--proc-crypto:`/proc/crypto` file parser'
|
||||
@@ -240,6 +257,7 @@ _jc() {
|
||||
'--proc-net-packet:`/proc/net/packet` file parser'
|
||||
'--proc-net-protocols:`/proc/net/protocols` file parser'
|
||||
'--proc-net-route:`/proc/net/route` file parser'
|
||||
'--proc-net-tcp:`/proc/net/tcp` and `/proc/net/tcp6` file parser'
|
||||
'--proc-net-unix:`/proc/net/unix` file parser'
|
||||
'--proc-pid-fdinfo:`/proc/<pid>/fdinfo/<fd>` file parser'
|
||||
'--proc-pid-io:`/proc/<pid>/io` file parser'
|
||||
@@ -251,6 +269,7 @@ _jc() {
|
||||
'--proc-pid-statm:`/proc/<pid>/statm` file parser'
|
||||
'--proc-pid-status:`/proc/<pid>/status` file parser'
|
||||
'--ps:`ps` command parser'
|
||||
'--resolve-conf:`/etc/resolve.conf` file parser'
|
||||
'--route:`route` command parser'
|
||||
'--rpm-qi:`rpm -qi` command parser'
|
||||
'--rsync:`rsync` command parser'
|
||||
@@ -264,6 +283,7 @@ _jc() {
|
||||
'--sshd-conf:`sshd` config file and `sshd -T` command parser'
|
||||
'--stat:`stat` command parser'
|
||||
'--stat-s:`stat` command streaming parser'
|
||||
'--swapon:`swapon` command parser'
|
||||
'--sysctl:`sysctl` command parser'
|
||||
'--syslog:Syslog RFC 5424 string parser'
|
||||
'--syslog-s:Syslog RFC 5424 string streaming parser'
|
||||
@@ -282,6 +302,7 @@ _jc() {
|
||||
'--top-s:`top -b` command streaming parser'
|
||||
'--tracepath:`tracepath` and `tracepath6` command parser'
|
||||
'--traceroute:`traceroute` and `traceroute6` command parser'
|
||||
'--tune2fs:`tune2fs -l` command parser'
|
||||
'--udevadm:`udevadm info` command parser'
|
||||
'--ufw:`ufw status` command parser'
|
||||
'--ufw-appinfo:`ufw app info [application]` command parser'
|
||||
|
||||
@@ -250,4 +250,4 @@ Returns:
|
||||
### Parser Information
|
||||
Compatibility: linux
|
||||
|
||||
Version 1.6 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
Version 1.7 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
|
||||
@@ -158,4 +158,4 @@ Returns:
|
||||
### Parser Information
|
||||
Compatibility: linux, darwin, cygwin, win32, aix, freebsd
|
||||
|
||||
Version 1.1 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
Version 1.2 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
|
||||
105
docs/parsers/debconf_show.md
Normal file
105
docs/parsers/debconf_show.md
Normal file
@@ -0,0 +1,105 @@
|
||||
[Home](https://kellyjonbrazil.github.io/jc/)
|
||||
<a id="jc.parsers.debconf_show"></a>
|
||||
|
||||
# jc.parsers.debconf\_show
|
||||
|
||||
jc - JSON Convert `debconf-show` command output parser
|
||||
|
||||
Usage (cli):
|
||||
|
||||
$ debconf-show onlyoffice-documentserver | jc --debconf-show
|
||||
|
||||
or
|
||||
|
||||
$ jc debconf-show onlyoffice-documentserver
|
||||
|
||||
Usage (module):
|
||||
|
||||
import jc
|
||||
result = jc.parse('debconf_show', debconf_show_command_output)
|
||||
|
||||
Schema:
|
||||
|
||||
[
|
||||
{
|
||||
"asked": boolean,
|
||||
"packagename": string,
|
||||
"name": string,
|
||||
"value": string
|
||||
}
|
||||
]
|
||||
|
||||
Examples:
|
||||
|
||||
$ debconf-show onlyoffice-documentserver | jc --debconf-show -p
|
||||
[
|
||||
{
|
||||
"asked": true,
|
||||
"packagename": "onlyoffice",
|
||||
"name": "jwt_secret",
|
||||
"value": "aL8ei2iereuzee7cuJ6Cahjah1ixee2ah"
|
||||
},
|
||||
{
|
||||
"asked": false,
|
||||
"packagename": "onlyoffice",
|
||||
"name": "db_pwd",
|
||||
"value": "(password omitted)"
|
||||
},
|
||||
{
|
||||
"asked": true,
|
||||
"packagename": "onlyoffice",
|
||||
"name": "rabbitmq_pwd",
|
||||
"value": "(password omitted)"
|
||||
},
|
||||
{
|
||||
"asked": true,
|
||||
"packagename": "onlyoffice",
|
||||
"name": "db_port",
|
||||
"value": "5432"
|
||||
},
|
||||
{
|
||||
"asked": true,
|
||||
"packagename": "onlyoffice",
|
||||
"name": "db_user",
|
||||
"value": "onlyoffice"
|
||||
},
|
||||
{
|
||||
"asked": true,
|
||||
"packagename": "onlyoffice",
|
||||
"name": "rabbitmq_proto",
|
||||
"value": "amqp"
|
||||
},
|
||||
{
|
||||
"asked": true,
|
||||
"packagename": "onlyoffice",
|
||||
"name": "cluster_mode",
|
||||
"value": "false"
|
||||
}
|
||||
]
|
||||
|
||||
<a id="jc.parsers.debconf_show.parse"></a>
|
||||
|
||||
### parse
|
||||
|
||||
```python
|
||||
def parse(data: str,
|
||||
raw: bool = False,
|
||||
quiet: bool = False) -> List[JSONDictType]
|
||||
```
|
||||
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) unprocessed output if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of Dictionaries. Raw or processed structured data.
|
||||
|
||||
### Parser Information
|
||||
Compatibility: linux
|
||||
|
||||
Version 1.0 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
@@ -90,10 +90,10 @@ Parameters:
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary of raw structured data or
|
||||
List of Dictionaries of processed structured data
|
||||
Dictionary of raw structured data or (default)
|
||||
List of Dictionaries of processed structured data (raw)
|
||||
|
||||
### Parser Information
|
||||
Compatibility: linux, darwin, cygwin, win32, aix, freebsd
|
||||
|
||||
Version 1.4 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
Version 1.5 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
|
||||
82
docs/parsers/find.md
Normal file
82
docs/parsers/find.md
Normal file
@@ -0,0 +1,82 @@
|
||||
[Home](https://kellyjonbrazil.github.io/jc/)
|
||||
<a id="jc.parsers.find"></a>
|
||||
|
||||
# jc.parsers.find
|
||||
|
||||
jc - JSON Convert `find` command output parser
|
||||
|
||||
This parser returns a list of objects by default and a list of strings if
|
||||
the `--raw` option is used.
|
||||
|
||||
Usage (cli):
|
||||
|
||||
$ find | jc --find
|
||||
|
||||
Usage (module):
|
||||
|
||||
import jc
|
||||
result = jc.parse('find', find_command_output)
|
||||
|
||||
Schema:
|
||||
|
||||
[
|
||||
{
|
||||
"path": string,
|
||||
"node": string,
|
||||
"error": string
|
||||
}
|
||||
]
|
||||
|
||||
Examples:
|
||||
|
||||
$ find | jc --find -p
|
||||
[
|
||||
{
|
||||
"path": "./directory"
|
||||
"node": "filename"
|
||||
},
|
||||
{
|
||||
"path": "./anotherdirectory"
|
||||
"node": "anotherfile"
|
||||
},
|
||||
{
|
||||
"path": null
|
||||
"node": null
|
||||
"error": "find: './inaccessible': Permission denied"
|
||||
}
|
||||
...
|
||||
]
|
||||
|
||||
$ find | jc --find -p -r
|
||||
[
|
||||
"./templates/readme_template",
|
||||
"./templates/manpage_template",
|
||||
"./.github/workflows/pythonapp.yml",
|
||||
...
|
||||
]
|
||||
|
||||
<a id="jc.parsers.find.parse"></a>
|
||||
|
||||
### parse
|
||||
|
||||
```python
|
||||
def parse(data, raw=False, quiet=False)
|
||||
```
|
||||
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) unprocessed output if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of raw strings or
|
||||
List of Dictionaries of processed structured data
|
||||
|
||||
### Parser Information
|
||||
Compatibility: linux
|
||||
|
||||
Version 1.0 by Solomon Leang (solomonleang@gmail.com)
|
||||
113
docs/parsers/host.md
Normal file
113
docs/parsers/host.md
Normal file
@@ -0,0 +1,113 @@
|
||||
[Home](https://kellyjonbrazil.github.io/jc/)
|
||||
<a id="jc.parsers.host"></a>
|
||||
|
||||
# jc.parsers.host
|
||||
|
||||
jc - JSON Convert `host` command output parser
|
||||
|
||||
Supports parsing of the most commonly used RR types (A, AAAA, MX, TXT)
|
||||
|
||||
Usage (cli):
|
||||
|
||||
$ host google.com | jc --host
|
||||
|
||||
or
|
||||
|
||||
$ jc host google.com
|
||||
|
||||
Usage (module):
|
||||
|
||||
import jc
|
||||
result = jc.parse('host', host_command_output)
|
||||
|
||||
Schema:
|
||||
|
||||
[
|
||||
{
|
||||
"hostname": string,
|
||||
"address": [
|
||||
string
|
||||
],
|
||||
"v6-address": [
|
||||
string
|
||||
],
|
||||
"mail": [
|
||||
string
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
[
|
||||
{
|
||||
"nameserver": string,
|
||||
"zone": string,
|
||||
"mname": string,
|
||||
"rname": string,
|
||||
"serial": integer,
|
||||
"refresh": integer,
|
||||
"retry": integer,
|
||||
"expire": integer,
|
||||
"minimum": integer
|
||||
}
|
||||
]
|
||||
|
||||
Examples:
|
||||
|
||||
$ host google.com | jc --host
|
||||
[
|
||||
{
|
||||
"hostname": "google.com",
|
||||
"address": [
|
||||
"142.251.39.110"
|
||||
],
|
||||
"v6-address": [
|
||||
"2a00:1450:400e:811::200e"
|
||||
],
|
||||
"mail": [
|
||||
"smtp.google.com."
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
$ jc host -C sunet.se
|
||||
[
|
||||
{
|
||||
"nameserver": "2001:6b0:7::2",
|
||||
"zone": "sunet.se",
|
||||
"mname": "sunic.sunet.se.",
|
||||
"rname": "hostmaster.sunet.se.",
|
||||
"serial": "2023090401",
|
||||
"refresh": "28800",
|
||||
"retry": "7200",
|
||||
"expire": "604800",
|
||||
"minimum": "300"
|
||||
},
|
||||
{
|
||||
...
|
||||
}
|
||||
]
|
||||
|
||||
<a id="jc.parsers.host.parse"></a>
|
||||
|
||||
### parse
|
||||
|
||||
```python
|
||||
def parse(data: str, raw: bool = False, quiet: bool = False)
|
||||
```
|
||||
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) unprocessed output if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of Dictionaries. Raw or processed structured data.
|
||||
|
||||
### Parser Information
|
||||
Compatibility: linux, darwin, cygwin, win32, aix, freebsd
|
||||
|
||||
Version 1.0 by Pettai (pettai@sunet.se)
|
||||
@@ -22,12 +22,12 @@ contained in lists/arrays.
|
||||
|
||||
Usage (cli):
|
||||
|
||||
$ cat foo.ini | jc --ini
|
||||
$ cat foo.ini | jc --ini-dup
|
||||
|
||||
Usage (module):
|
||||
|
||||
import jc
|
||||
result = jc.parse('ini', ini_file_output)
|
||||
result = jc.parse('ini_dup', ini_file_output)
|
||||
|
||||
Schema:
|
||||
|
||||
@@ -67,7 +67,7 @@ Examples:
|
||||
fruit = peach
|
||||
color = green
|
||||
|
||||
$ cat example.ini | jc --ini -p
|
||||
$ cat example.ini | jc --ini-dup -p
|
||||
{
|
||||
"foo": [
|
||||
"fiz"
|
||||
@@ -118,4 +118,4 @@ Returns:
|
||||
### Parser Information
|
||||
Compatibility: linux, darwin, cygwin, win32, aix, freebsd
|
||||
|
||||
Version 1.0 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
Version 1.1 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
|
||||
74
docs/parsers/ip_route.md
Normal file
74
docs/parsers/ip_route.md
Normal file
@@ -0,0 +1,74 @@
|
||||
[Home](https://kellyjonbrazil.github.io/jc/)
|
||||
<a id="jc.parsers.ip_route"></a>
|
||||
|
||||
# jc.parsers.ip\_route
|
||||
|
||||
jc - JSON Convert `ip route` command output parser
|
||||
|
||||
Usage (cli):
|
||||
|
||||
$ ip route | jc --ip-route
|
||||
|
||||
or
|
||||
|
||||
$ jc ip-route
|
||||
|
||||
Usage (module):
|
||||
|
||||
import jc
|
||||
result = jc.parse('ip_route', ip_route_command_output)
|
||||
|
||||
Schema:
|
||||
|
||||
[
|
||||
{
|
||||
"ip": string,
|
||||
"via": string,
|
||||
"dev": string,
|
||||
"metric": integer,
|
||||
"proto": string,
|
||||
"scope": string,
|
||||
"src": string,
|
||||
"via": string,
|
||||
"status": string
|
||||
}
|
||||
]
|
||||
|
||||
Examples:
|
||||
|
||||
$ ip route | jc --ip-route -p
|
||||
[
|
||||
{
|
||||
"ip": "10.0.2.0/24",
|
||||
"dev": "enp0s3",
|
||||
"proto": "kernel",
|
||||
"scope": "link",
|
||||
"src": "10.0.2.15",
|
||||
"metric": 100
|
||||
}
|
||||
]
|
||||
|
||||
<a id="jc.parsers.ip_route.parse"></a>
|
||||
|
||||
### parse
|
||||
|
||||
```python
|
||||
def parse(data, raw=False, quiet=False)
|
||||
```
|
||||
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) unprocessed output if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of Json objects if data is processed and Raw data if raw = true.
|
||||
|
||||
### Parser Information
|
||||
Compatibility: linux
|
||||
|
||||
Version 1.0 by Julian Jackson (jackson.julian55@yahoo.com)
|
||||
@@ -30,7 +30,7 @@ Schema:
|
||||
"num" integer,
|
||||
"pkts": integer,
|
||||
"bytes": integer, # converted based on suffix
|
||||
"target": string,
|
||||
"target": string, # Null if blank
|
||||
"prot": string,
|
||||
"opt": string, # "--" = Null
|
||||
"in": string,
|
||||
@@ -186,4 +186,4 @@ Returns:
|
||||
### Parser Information
|
||||
Compatibility: linux
|
||||
|
||||
Version 1.8 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
Version 1.9 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
|
||||
@@ -1,37 +0,0 @@
|
||||
[Home](https://kellyjonbrazil.github.io/jc/)
|
||||
<a id="jc.parsers.iso_datetime"></a>
|
||||
|
||||
# jc.parsers.iso\_datetime
|
||||
|
||||
jc - JSON Convert ISO 8601 Datetime string parser
|
||||
|
||||
This parser has been renamed to datetime-iso (cli) or datetime_iso (module).
|
||||
|
||||
This parser will be removed in a future version, so please start using
|
||||
the new parser name.
|
||||
|
||||
<a id="jc.parsers.iso_datetime.parse"></a>
|
||||
|
||||
### parse
|
||||
|
||||
```python
|
||||
def parse(data, raw=False, quiet=False)
|
||||
```
|
||||
|
||||
This parser is deprecated and calls datetime_iso. Please use datetime_iso
|
||||
directly. This parser will be removed in the future.
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) unprocessed output if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Raw or processed structured data.
|
||||
|
||||
### Parser Information
|
||||
Compatibility: linux, aix, freebsd, darwin, win32, cygwin
|
||||
|
||||
Version 1.1 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
61
docs/parsers/lsb_release.md
Normal file
61
docs/parsers/lsb_release.md
Normal file
@@ -0,0 +1,61 @@
|
||||
[Home](https://kellyjonbrazil.github.io/jc/)
|
||||
<a id="jc.parsers.lsb_release"></a>
|
||||
|
||||
# jc.parsers.lsb\_release
|
||||
|
||||
jc - JSON Convert `lsb_release` command parser
|
||||
|
||||
This parser is an alias to the Key/Value parser (`--kv`).
|
||||
|
||||
Usage (cli):
|
||||
|
||||
$ lsb_release -a | jc --lsb-release
|
||||
|
||||
or
|
||||
$ jc lsb_release -a
|
||||
|
||||
Usage (module):
|
||||
|
||||
import jc
|
||||
result = jc.parse('lsb_release', lsb_release_command_output)
|
||||
|
||||
Schema:
|
||||
|
||||
{
|
||||
"<key>": string
|
||||
}
|
||||
|
||||
Examples:
|
||||
|
||||
$ lsb_release -a | jc --lsb-release -p
|
||||
{
|
||||
"Distributor ID": "Ubuntu",
|
||||
"Description": "Ubuntu 16.04.6 LTS",
|
||||
"Release": "16.04",
|
||||
"Codename": "xenial"
|
||||
}
|
||||
|
||||
<a id="jc.parsers.lsb_release.parse"></a>
|
||||
|
||||
### parse
|
||||
|
||||
```python
|
||||
def parse(data: str, raw: bool = False, quiet: bool = False) -> JSONDictType
|
||||
```
|
||||
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) unprocessed output if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Raw or processed structured data.
|
||||
|
||||
### Parser Information
|
||||
Compatibility: linux, darwin, cygwin, win32, aix, freebsd
|
||||
|
||||
Version 1.0 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
@@ -98,4 +98,4 @@ Returns:
|
||||
### Parser Information
|
||||
Compatibility: linux, darwin, freebsd, aix
|
||||
|
||||
Version 1.8 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
Version 1.9 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
|
||||
@@ -378,4 +378,4 @@ Returns:
|
||||
### Parser Information
|
||||
Compatibility: linux, darwin, freebsd, win32
|
||||
|
||||
Version 1.14 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
Version 1.15 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
|
||||
90
docs/parsers/nsd_control.md
Normal file
90
docs/parsers/nsd_control.md
Normal file
@@ -0,0 +1,90 @@
|
||||
[Home](https://kellyjonbrazil.github.io/jc/)
|
||||
<a id="jc.parsers.nsd_control"></a>
|
||||
|
||||
# jc.parsers.nsd\_control
|
||||
|
||||
jc - JSON Convert `nsd-control` command output parser
|
||||
|
||||
Usage (cli):
|
||||
|
||||
$ nsd-control | jc --nsd-control
|
||||
|
||||
or
|
||||
|
||||
$ jc nsd-control
|
||||
|
||||
Usage (module):
|
||||
|
||||
import jc
|
||||
result = jc.parse('nsd_control', nsd_control_command_output)
|
||||
|
||||
Schema:
|
||||
|
||||
[
|
||||
{
|
||||
"version": string,
|
||||
"verbosity": integer,
|
||||
"ratelimit": integer
|
||||
}
|
||||
]
|
||||
|
||||
[
|
||||
{
|
||||
"zone": string
|
||||
"status": {
|
||||
"state": string,
|
||||
"served-serial": string,
|
||||
"commit-serial": string,
|
||||
"wait": string
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
Examples:
|
||||
|
||||
$ nsd-control | jc --nsd-control status
|
||||
[
|
||||
{
|
||||
"version": "4.6.2",
|
||||
"verbosity": "2",
|
||||
"ratelimit": "0"
|
||||
}
|
||||
]
|
||||
|
||||
$ nsd-control | jc --nsd-control zonestatus sunet.se
|
||||
[
|
||||
{
|
||||
"zone": "sunet.se",
|
||||
"status": {
|
||||
"state": "ok",
|
||||
"served-serial": "2023090704 since 2023-09-07T16:34:27",
|
||||
"commit-serial": "2023090704 since 2023-09-07T16:34:27",
|
||||
"wait": "28684 sec between attempts"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
<a id="jc.parsers.nsd_control.parse"></a>
|
||||
|
||||
### parse
|
||||
|
||||
```python
|
||||
def parse(data: str, raw: bool = False, quiet: bool = False)
|
||||
```
|
||||
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) unprocessed output if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of Dictionaries. Raw or processed structured data.
|
||||
|
||||
### Parser Information
|
||||
Compatibility: linux, darwin, cygwin, win32, aix, freebsd
|
||||
|
||||
Version 1.0 by Pettai (pettai@sunet.se)
|
||||
86
docs/parsers/os_release.md
Normal file
86
docs/parsers/os_release.md
Normal file
@@ -0,0 +1,86 @@
|
||||
[Home](https://kellyjonbrazil.github.io/jc/)
|
||||
<a id="jc.parsers.os_release"></a>
|
||||
|
||||
# jc.parsers.os\_release
|
||||
|
||||
jc - JSON Convert `/etc/os-release` file parser
|
||||
|
||||
This parser is an alias to the Key/Value parser (`--kv`).
|
||||
|
||||
Usage (cli):
|
||||
|
||||
$ cat /etc/os-release | jc --os-release
|
||||
|
||||
Usage (module):
|
||||
|
||||
import jc
|
||||
result = jc.parse('os_release', os_release_output)
|
||||
|
||||
Schema:
|
||||
|
||||
{
|
||||
"<key>": string
|
||||
}
|
||||
|
||||
Examples:
|
||||
|
||||
$ cat /etc/os-release | jc --os-release -p
|
||||
{
|
||||
"NAME": "CentOS Linux",
|
||||
"VERSION": "7 (Core)",
|
||||
"ID": "centos",
|
||||
"ID_LIKE": "rhel fedora",
|
||||
"VERSION_ID": "7",
|
||||
"PRETTY_NAME": "CentOS Linux 7 (Core)",
|
||||
"ANSI_COLOR": "0;31",
|
||||
"CPE_NAME": "cpe:/o:centos:centos:7",
|
||||
"HOME_URL": "https://www.centos.org/",
|
||||
"BUG_REPORT_URL": "https://bugs.centos.org/",
|
||||
"CENTOS_MANTISBT_PROJECT": "CentOS-7",
|
||||
"CENTOS_MANTISBT_PROJECT_VERSION": "7",
|
||||
"REDHAT_SUPPORT_PRODUCT": "centos",
|
||||
"REDHAT_SUPPORT_PRODUCT_VERSION": "7"
|
||||
}
|
||||
|
||||
$ cat /etc/os-release | jc --os-release -p -r
|
||||
{
|
||||
"NAME": "\\"CentOS Linux\\"",
|
||||
"VERSION": "\\"7 (Core)\\"",
|
||||
"ID": "\\"centos\\"",
|
||||
"ID_LIKE": "\\"rhel fedora\\"",
|
||||
"VERSION_ID": "\\"7\\"",
|
||||
"PRETTY_NAME": "\\"CentOS Linux 7 (Core)\\"",
|
||||
"ANSI_COLOR": "\\"0;31\\"",
|
||||
"CPE_NAME": "\\"cpe:/o:centos:centos:7\\"",
|
||||
"HOME_URL": "\\"https://www.centos.org/\\"",
|
||||
"BUG_REPORT_URL": "\\"https://bugs.centos.org/\\"",
|
||||
"CENTOS_MANTISBT_PROJECT": "\\"CentOS-7\\"",
|
||||
"CENTOS_MANTISBT_PROJECT_VERSION": "\\"7\\"",
|
||||
"REDHAT_SUPPORT_PRODUCT": "\\"centos\\"",
|
||||
"REDHAT_SUPPORT_PRODUCT_VERSION": "\\"7\\""
|
||||
}
|
||||
|
||||
<a id="jc.parsers.os_release.parse"></a>
|
||||
|
||||
### parse
|
||||
|
||||
```python
|
||||
def parse(data: str, raw: bool = False, quiet: bool = False) -> JSONDictType
|
||||
```
|
||||
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) unprocessed output if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Raw or processed structured data.
|
||||
|
||||
### Parser Information
|
||||
Compatibility: linux, darwin, cygwin, win32, aix, freebsd
|
||||
|
||||
Version 1.0 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
@@ -45,6 +45,9 @@ Schema:
|
||||
"kb_ccwr_s": float,
|
||||
"cswch_s": float,
|
||||
"nvcswch_s": float,
|
||||
"usr_ms": integer,
|
||||
"system_ms": integer,
|
||||
"guest_ms": integer,
|
||||
"command": string
|
||||
}
|
||||
]
|
||||
@@ -148,4 +151,4 @@ Returns:
|
||||
### Parser Information
|
||||
Compatibility: linux
|
||||
|
||||
Version 1.1 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
Version 1.3 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
|
||||
@@ -39,6 +39,7 @@ Schema:
|
||||
"percent_usr": float,
|
||||
"percent_system": float,
|
||||
"percent_guest": float,
|
||||
"percent_wait": float,
|
||||
"percent_cpu": float,
|
||||
"cpu": integer,
|
||||
"minflt_s": float,
|
||||
@@ -53,6 +54,9 @@ Schema:
|
||||
"kb_ccwr_s": float,
|
||||
"cswch_s": float,
|
||||
"nvcswch_s": float,
|
||||
"usr_ms": integer,
|
||||
"system_ms": integer,
|
||||
"guest_ms": integer,
|
||||
"command": string,
|
||||
|
||||
# below object only exists if using -qq or ignore_exceptions=True
|
||||
@@ -107,4 +111,4 @@ Returns:
|
||||
### Parser Information
|
||||
Compatibility: linux
|
||||
|
||||
Version 1.1 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
Version 1.2 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
|
||||
@@ -35,6 +35,8 @@ Schema:
|
||||
"packets_received": integer,
|
||||
"packet_loss_percent": float,
|
||||
"duplicates": integer,
|
||||
"errors": integer,
|
||||
"corrupted": integer,
|
||||
"round_trip_ms_min": float,
|
||||
"round_trip_ms_avg": float,
|
||||
"round_trip_ms_max": float,
|
||||
@@ -185,4 +187,4 @@ Returns:
|
||||
### Parser Information
|
||||
Compatibility: linux, darwin, freebsd
|
||||
|
||||
Version 1.9 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
Version 1.10 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
|
||||
@@ -36,7 +36,7 @@ Schema:
|
||||
"source_ip": string,
|
||||
"destination_ip": string,
|
||||
"sent_bytes": integer,
|
||||
"pattern": string, # (null if not set)
|
||||
"pattern": string, # null if not set
|
||||
"destination": string,
|
||||
"timestamp": float,
|
||||
"response_bytes": integer,
|
||||
@@ -49,10 +49,12 @@ Schema:
|
||||
"packets_received": integer,
|
||||
"packet_loss_percent": float,
|
||||
"duplicates": integer,
|
||||
"round_trip_ms_min": float,
|
||||
"round_trip_ms_avg": float,
|
||||
"round_trip_ms_max": float,
|
||||
"round_trip_ms_stddev": float,
|
||||
"errors": integer, # null if not set
|
||||
"corrupted": integer, # null if not set
|
||||
"round_trip_ms_min": float, # null if not set
|
||||
"round_trip_ms_avg": float, # null if not set
|
||||
"round_trip_ms_max": float, # null if not set
|
||||
"round_trip_ms_stddev": float, # null if not set
|
||||
|
||||
# below object only exists if using -qq or ignore_exceptions=True
|
||||
"_jc_meta": {
|
||||
@@ -106,4 +108,4 @@ Returns:
|
||||
### Parser Information
|
||||
Compatibility: linux, darwin, freebsd
|
||||
|
||||
Version 1.3 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
Version 1.4 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
|
||||
126
docs/parsers/pkg_index_apk.md
Normal file
126
docs/parsers/pkg_index_apk.md
Normal file
@@ -0,0 +1,126 @@
|
||||
[Home](https://kellyjonbrazil.github.io/jc/)
|
||||
<a id="jc.parsers.pkg_index_apk"></a>
|
||||
|
||||
# jc.parsers.pkg\_index\_apk
|
||||
|
||||
jc - JSON Convert Alpine Linux Package Index files
|
||||
|
||||
Usage (cli):
|
||||
|
||||
$ cat APKINDEX | jc --pkg-index-apk
|
||||
|
||||
Usage (module):
|
||||
|
||||
import jc
|
||||
result = jc.parse('pkg_index_apk', pkg_index_apk_output)
|
||||
|
||||
Schema:
|
||||
|
||||
[
|
||||
{
|
||||
"checksum": string,
|
||||
"package": string,
|
||||
"version": string,
|
||||
"architecture": string,
|
||||
"package_size": integer,
|
||||
"installed_size": integer,
|
||||
"description": string,
|
||||
"url": string,
|
||||
"license": string,
|
||||
"origin": string,
|
||||
"maintainer": {
|
||||
"name": string,
|
||||
"email": string,
|
||||
},
|
||||
"build_time": integer,
|
||||
"commit": string,
|
||||
"provider_priority": string,
|
||||
"dependencies": [
|
||||
string
|
||||
],
|
||||
"provides": [
|
||||
string
|
||||
],
|
||||
"install_if": [
|
||||
string
|
||||
],
|
||||
}
|
||||
]
|
||||
|
||||
Example:
|
||||
|
||||
$ cat APKINDEX | jc --pkg-index-apk
|
||||
[
|
||||
{
|
||||
"checksum": "Q1znBl9k+RKgY6gl5Eg3iz73KZbLY=",
|
||||
"package": "yasm",
|
||||
"version": "1.3.0-r4",
|
||||
"architecture": "x86_64",
|
||||
"package_size": 772109,
|
||||
"installed_size": 1753088,
|
||||
"description": "A rewrite of NASM to allow for multiple synta...",
|
||||
"url": "http://www.tortall.net/projects/yasm/",
|
||||
"license": "BSD-2-Clause",
|
||||
"origin": "yasm",
|
||||
"maintainer": {
|
||||
"name": "Natanael Copa",
|
||||
"email": "ncopa@alpinelinux.org"
|
||||
},
|
||||
"build_time": 1681228881,
|
||||
"commit": "84a227baf001b6e0208e3352b294e4d7a40e93de",
|
||||
"dependencies": [
|
||||
"so:libc.musl-x86_64.so.1"
|
||||
],
|
||||
"provides": [
|
||||
"cmd:vsyasm=1.3.0-r4",
|
||||
"cmd:yasm=1.3.0-r4",
|
||||
"cmd:ytasm=1.3.0-r4"
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
$ cat APKINDEX | jc --pkg-index-apk --raw
|
||||
[
|
||||
{
|
||||
"C": "Q1znBl9k+RKgY6gl5Eg3iz73KZbLY=",
|
||||
"P": "yasm",
|
||||
"V": "1.3.0-r4",
|
||||
"A": "x86_64",
|
||||
"S": "772109",
|
||||
"I": "1753088",
|
||||
"T": "A rewrite of NASM to allow for multiple syntax supported...",
|
||||
"U": "http://www.tortall.net/projects/yasm/",
|
||||
"L": "BSD-2-Clause",
|
||||
"o": "yasm",
|
||||
"m": "Natanael Copa <ncopa@alpinelinux.org>",
|
||||
"t": "1681228881",
|
||||
"c": "84a227baf001b6e0208e3352b294e4d7a40e93de",
|
||||
"D": "so:libc.musl-x86_64.so.1",
|
||||
"p": "cmd:vsyasm=1.3.0-r4 cmd:yasm=1.3.0-r4 cmd:ytasm=1.3.0-r4"
|
||||
},
|
||||
]
|
||||
|
||||
<a id="jc.parsers.pkg_index_apk.parse"></a>
|
||||
|
||||
### parse
|
||||
|
||||
```python
|
||||
def parse(data: str, raw: bool = False, quiet: bool = False) -> List[Dict]
|
||||
```
|
||||
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) unprocessed output if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of Dictionaries. Raw or processed structured data.
|
||||
|
||||
### Parser Information
|
||||
Compatibility: linux, darwin, cygwin, win32, aix, freebsd
|
||||
|
||||
Version 1.0 by Roey Darwish Dror (roey.ghost@gmail.com)
|
||||
138
docs/parsers/pkg_index_deb.md
Normal file
138
docs/parsers/pkg_index_deb.md
Normal file
@@ -0,0 +1,138 @@
|
||||
[Home](https://kellyjonbrazil.github.io/jc/)
|
||||
<a id="jc.parsers.pkg_index_deb"></a>
|
||||
|
||||
# jc.parsers.pkg\_index\_deb
|
||||
|
||||
jc - JSON Convert Debian Package Index file parser
|
||||
|
||||
Usage (cli):
|
||||
|
||||
$ cat Packages | jc --pkg-index-deb
|
||||
|
||||
Usage (module):
|
||||
|
||||
import jc
|
||||
result = jc.parse('pkg_index_deb', pkg_index_deb_output)
|
||||
|
||||
Schema:
|
||||
|
||||
[
|
||||
{
|
||||
"package": string,
|
||||
"version": string,
|
||||
"architecture": string,
|
||||
"section": string,
|
||||
"priority": string,
|
||||
"installed_size": integer,
|
||||
"maintainer": string,
|
||||
"description": string,
|
||||
"homepage": string,
|
||||
"depends": string,
|
||||
"conflicts": string,
|
||||
"replaces": string,
|
||||
"vcs_git": string,
|
||||
"sha256": string,
|
||||
"size": integer,
|
||||
"vcs_git": string,
|
||||
"filename": string
|
||||
}
|
||||
]
|
||||
|
||||
Examples:
|
||||
|
||||
$ cat Packages | jc --pkg-index-deb
|
||||
[
|
||||
{
|
||||
"package": "aspnetcore-runtime-2.1",
|
||||
"version": "2.1.22-1",
|
||||
"architecture": "amd64",
|
||||
"section": "devel",
|
||||
"priority": "standard",
|
||||
"installed_size": 71081,
|
||||
"maintainer": "Microsoft <nugetaspnet@microsoft.com>",
|
||||
"description": "Microsoft ASP.NET Core 2.1.22 Shared Framework",
|
||||
"homepage": "https://www.asp.net/",
|
||||
"depends": "libc6 (>= 2.14), dotnet-runtime-2.1 (>= 2.1.22)",
|
||||
"sha256": "48d4e78a7ceff34105411172f4c3e91a0359b3929d84d26a493...",
|
||||
"size": 21937036,
|
||||
"filename": "pool/main/a/aspnetcore-runtime-2.1/aspnetcore-run..."
|
||||
},
|
||||
{
|
||||
"package": "azure-functions-core-tools-4",
|
||||
"version": "4.0.4590-1",
|
||||
"architecture": "amd64",
|
||||
"section": "devel",
|
||||
"priority": "optional",
|
||||
"maintainer": "Ahmed ElSayed <ahmels@microsoft.com>",
|
||||
"description": "Azure Function Core Tools v4",
|
||||
"homepage": "https://docs.microsoft.com/en-us/azure/azure-func...",
|
||||
"conflicts": "azure-functions-core-tools-2, azure-functions-co...",
|
||||
"replaces": "azure-functions-core-tools-2, azure-functions-cor...",
|
||||
"vcs_git": "https://github.com/Azure/azure-functions-core-tool...",
|
||||
"sha256": "a2a4f99d6d98ba0a46832570285552f2a93bab06cebbda2afc7...",
|
||||
"size": 124417844,
|
||||
"filename": "pool/main/a/azure-functions-core-tools-4/azure-fu..."
|
||||
}
|
||||
]
|
||||
|
||||
$ cat Packages | jc --pkg-index-deb -r
|
||||
[
|
||||
{
|
||||
"package": "aspnetcore-runtime-2.1",
|
||||
"version": "2.1.22-1",
|
||||
"architecture": "amd64",
|
||||
"section": "devel",
|
||||
"priority": "standard",
|
||||
"installed_size": "71081",
|
||||
"maintainer": "Microsoft <nugetaspnet@microsoft.com>",
|
||||
"description": "Microsoft ASP.NET Core 2.1.22 Shared Framework",
|
||||
"homepage": "https://www.asp.net/",
|
||||
"depends": "libc6 (>= 2.14), dotnet-runtime-2.1 (>= 2.1.22)",
|
||||
"sha256": "48d4e78a7ceff34105411172f4c3e91a0359b3929d84d26a493...",
|
||||
"size": "21937036",
|
||||
"filename": "pool/main/a/aspnetcore-runtime-2.1/aspnetcore-run..."
|
||||
},
|
||||
{
|
||||
"package": "azure-functions-core-tools-4",
|
||||
"version": "4.0.4590-1",
|
||||
"architecture": "amd64",
|
||||
"section": "devel",
|
||||
"priority": "optional",
|
||||
"maintainer": "Ahmed ElSayed <ahmels@microsoft.com>",
|
||||
"description": "Azure Function Core Tools v4",
|
||||
"homepage": "https://docs.microsoft.com/en-us/azure/azure-func...",
|
||||
"conflicts": "azure-functions-core-tools-2, azure-functions-co...",
|
||||
"replaces": "azure-functions-core-tools-2, azure-functions-cor...",
|
||||
"vcs_git": "https://github.com/Azure/azure-functions-core-tool...",
|
||||
"sha256": "a2a4f99d6d98ba0a46832570285552f2a93bab06cebbda2afc7...",
|
||||
"size": "124417844",
|
||||
"filename": "pool/main/a/azure-functions-core-tools-4/azure-fu..."
|
||||
}
|
||||
]
|
||||
|
||||
<a id="jc.parsers.pkg_index_deb.parse"></a>
|
||||
|
||||
### parse
|
||||
|
||||
```python
|
||||
def parse(data: str,
|
||||
raw: bool = False,
|
||||
quiet: bool = False) -> List[JSONDictType]
|
||||
```
|
||||
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) unprocessed output if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of Dictionaries. Raw or processed structured data.
|
||||
|
||||
### Parser Information
|
||||
Compatibility: linux, darwin, cygwin, win32, aix, freebsd
|
||||
|
||||
Version 1.0 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
@@ -139,4 +139,4 @@ Returns:
|
||||
### Parser Information
|
||||
Compatibility: linux
|
||||
|
||||
Version 1.1 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
Version 1.2 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
|
||||
92
docs/parsers/proc_cmdline.md
Normal file
92
docs/parsers/proc_cmdline.md
Normal file
@@ -0,0 +1,92 @@
|
||||
[Home](https://kellyjonbrazil.github.io/jc/)
|
||||
<a id="jc.parsers.proc_cmdline"></a>
|
||||
|
||||
# jc.parsers.proc\_cmdline
|
||||
|
||||
jc - JSON Convert `/proc/cmdline` file parser
|
||||
|
||||
Usage (cli):
|
||||
|
||||
$ cat /proc/cmdline | jc --proc
|
||||
|
||||
or
|
||||
|
||||
$ jc /proc/cmdline
|
||||
|
||||
or
|
||||
|
||||
$ cat /proc/cmdline | jc --proc-cmdline
|
||||
|
||||
Usage (module):
|
||||
|
||||
import jc
|
||||
result = jc.parse('proc_cmdline', proc_cmdline_file)
|
||||
|
||||
Schema:
|
||||
|
||||
{
|
||||
"<key>": string,
|
||||
"_options": [
|
||||
string
|
||||
]
|
||||
}
|
||||
|
||||
Examples:
|
||||
|
||||
$ cat /proc/cmdline | jc --proc -p
|
||||
{
|
||||
"BOOT_IMAGE": "clonezilla/live/vmlinuz",
|
||||
"consoleblank": "0",
|
||||
"keyboard-options": "grp:ctrl_shift_toggle,lctrl_shift_toggle",
|
||||
"ethdevice-timeout": "130",
|
||||
"toram": "filesystem.squashfs",
|
||||
"boot": "live",
|
||||
"edd": "on",
|
||||
"ocs_daemonon": "ssh lighttpd",
|
||||
"ocs_live_run": "sudo screen /usr/sbin/ocs-sr -g auto -e1 auto -e2 -batch -r -j2 -k -scr -p true restoreparts win7-64 sda1",
|
||||
"ocs_live_extra_param": "",
|
||||
"keyboard-layouts": "us,ru",
|
||||
"ocs_live_batch": "no",
|
||||
"locales": "ru_RU.UTF-8",
|
||||
"vga": "788",
|
||||
"net.ifnames": "0",
|
||||
"union": "overlay",
|
||||
"fetch": "http://10.1.1.1/tftpboot/clonezilla/live/filesystem.squashfs",
|
||||
"ocs_postrun99": "sudo reboot",
|
||||
"initrd": "clonezilla/live/initrd.img",
|
||||
"_options": [
|
||||
"config",
|
||||
"noswap",
|
||||
"nolocales",
|
||||
"nomodeset",
|
||||
"noprompt",
|
||||
"nosplash",
|
||||
"nodmraid",
|
||||
"components"
|
||||
]
|
||||
}
|
||||
|
||||
<a id="jc.parsers.proc_cmdline.parse"></a>
|
||||
|
||||
### parse
|
||||
|
||||
```python
|
||||
def parse(data: str, raw: bool = False, quiet: bool = False) -> JSONDictType
|
||||
```
|
||||
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) unprocessed output if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Raw or processed structured data.
|
||||
|
||||
### Parser Information
|
||||
Compatibility: linux
|
||||
|
||||
Version 1.0 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
186
docs/parsers/proc_net_tcp.md
Normal file
186
docs/parsers/proc_net_tcp.md
Normal file
@@ -0,0 +1,186 @@
|
||||
[Home](https://kellyjonbrazil.github.io/jc/)
|
||||
<a id="jc.parsers.proc_net_tcp"></a>
|
||||
|
||||
# jc.parsers.proc\_net\_tcp
|
||||
|
||||
jc - JSON Convert `/proc/net/tcp` and `proc/net/tcp6` file parser
|
||||
|
||||
IPv4 and IPv6 addresses are converted to standard notation unless the raw
|
||||
(--raw) option is used.
|
||||
|
||||
Usage (cli):
|
||||
|
||||
$ cat /proc/net/tcp | jc --proc
|
||||
|
||||
or
|
||||
|
||||
$ jc /proc/net/tcp
|
||||
|
||||
or
|
||||
|
||||
$ cat /proc/net/tcp | jc --proc-net-tcp
|
||||
|
||||
Usage (module):
|
||||
|
||||
import jc
|
||||
result = jc.parse('proc', proc_net_tcp_file)
|
||||
|
||||
or
|
||||
|
||||
import jc
|
||||
result = jc.parse('proc_net_tcp', proc_net_tcp_file)
|
||||
|
||||
Schema:
|
||||
|
||||
Field names and types gathered from the following:
|
||||
|
||||
https://www.kernel.org/doc/Documentation/networking/proc_net_tcp.txt
|
||||
|
||||
https://github.com/torvalds/linux/blob/master/net/ipv4/tcp_ipv4.c
|
||||
|
||||
https://github.com/torvalds/linux/blob/master/net/ipv6/tcp_ipv6.c
|
||||
|
||||
[
|
||||
{
|
||||
"entry": integer,
|
||||
"local_address": string,
|
||||
"local_port": integer,
|
||||
"remote_address": string,
|
||||
"remote_port": integer,
|
||||
"state": string,
|
||||
"tx_queue": string,
|
||||
"rx_queue": string,
|
||||
"timer_active": integer,
|
||||
"jiffies_until_timer_expires": string,
|
||||
"unrecovered_rto_timeouts": string,
|
||||
"uid": integer,
|
||||
"unanswered_0_window_probes": integer,
|
||||
"inode": integer,
|
||||
"sock_ref_count": integer,
|
||||
"sock_mem_loc": string,
|
||||
"retransmit_timeout": integer,
|
||||
"soft_clock_tick": integer,
|
||||
"ack_quick_pingpong": integer,
|
||||
"sending_congestion_window": integer,
|
||||
"slow_start_size_threshold": integer
|
||||
}
|
||||
]
|
||||
|
||||
Examples:
|
||||
|
||||
$ cat /proc/net/tcp | jc --proc -p
|
||||
[
|
||||
{
|
||||
"entry": "0",
|
||||
"local_address": "10.0.0.28",
|
||||
"local_port": 42082,
|
||||
"remote_address": "64.12.0.108",
|
||||
"remote_port": 80,
|
||||
"state": "04",
|
||||
"tx_queue": "00000001",
|
||||
"rx_queue": "00000000",
|
||||
"timer_active": 1,
|
||||
"jiffies_until_timer_expires": "00000015",
|
||||
"unrecovered_rto_timeouts": "00000000",
|
||||
"uid": 0,
|
||||
"unanswered_0_window_probes": 0,
|
||||
"inode": 0,
|
||||
"sock_ref_count": 3,
|
||||
"sock_mem_loc": "ffff8c7a0de930c0",
|
||||
"retransmit_timeout": 21,
|
||||
"soft_clock_tick": 4,
|
||||
"ack_quick_pingpong": 30,
|
||||
"sending_congestion_window": 10,
|
||||
"slow_start_size_threshold": -1
|
||||
},
|
||||
{
|
||||
"entry": "1",
|
||||
"local_address": "10.0.0.28",
|
||||
"local_port": 38864,
|
||||
"remote_address": "104.244.42.65",
|
||||
"remote_port": 80,
|
||||
"state": "06",
|
||||
"tx_queue": "00000000",
|
||||
"rx_queue": "00000000",
|
||||
"timer_active": 3,
|
||||
"jiffies_until_timer_expires": "000007C5",
|
||||
"unrecovered_rto_timeouts": "00000000",
|
||||
"uid": 0,
|
||||
"unanswered_0_window_probes": 0,
|
||||
"inode": 0,
|
||||
"sock_ref_count": 3,
|
||||
"sock_mem_loc": "ffff8c7a12d31aa0"
|
||||
},
|
||||
...
|
||||
]
|
||||
|
||||
$ cat /proc/net/tcp | jc --proc -p -r
|
||||
[
|
||||
{
|
||||
"entry": "1",
|
||||
"local_address": "1C00000A",
|
||||
"local_port": "A462",
|
||||
"remote_address": "6C000C40",
|
||||
"remote_port": "0050",
|
||||
"state": "04",
|
||||
"tx_queue": "00000001",
|
||||
"rx_queue": "00000000",
|
||||
"timer_active": "01",
|
||||
"jiffies_until_timer_expires": "00000015",
|
||||
"unrecovered_rto_timeouts": "00000000",
|
||||
"uid": "0",
|
||||
"unanswered_0_window_probes": "0",
|
||||
"inode": "0",
|
||||
"sock_ref_count": "3",
|
||||
"sock_mem_loc": "ffff8c7a0de930c0",
|
||||
"retransmit_timeout": "21",
|
||||
"soft_clock_tick": "4",
|
||||
"ack_quick_pingpong": "30",
|
||||
"sending_congestion_window": "10",
|
||||
"slow_start_size_threshold": "-1"
|
||||
},
|
||||
{
|
||||
"entry": "2",
|
||||
"local_address": "1C00000A",
|
||||
"local_port": "97D0",
|
||||
"remote_address": "412AF468",
|
||||
"remote_port": "0050",
|
||||
"state": "06",
|
||||
"tx_queue": "00000000",
|
||||
"rx_queue": "00000000",
|
||||
"timer_active": "03",
|
||||
"jiffies_until_timer_expires": "000007C5",
|
||||
"unrecovered_rto_timeouts": "00000000",
|
||||
"uid": "0",
|
||||
"unanswered_0_window_probes": "0",
|
||||
"inode": "0",
|
||||
"sock_ref_count": "3",
|
||||
"sock_mem_loc": "ffff8c7a12d31aa0"
|
||||
},
|
||||
...
|
||||
]
|
||||
|
||||
<a id="jc.parsers.proc_net_tcp.parse"></a>
|
||||
|
||||
### parse
|
||||
|
||||
```python
|
||||
def parse(data: str, raw: bool = False, quiet: bool = False) -> List[Dict]
|
||||
```
|
||||
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) unprocessed output if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of Dictionaries. Raw or processed structured data.
|
||||
|
||||
### Parser Information
|
||||
Compatibility: linux
|
||||
|
||||
Version 1.0 by Alvin Solomon (alvinms01@gmail.com)
|
||||
83
docs/parsers/resolve_conf.md
Normal file
83
docs/parsers/resolve_conf.md
Normal file
@@ -0,0 +1,83 @@
|
||||
[Home](https://kellyjonbrazil.github.io/jc/)
|
||||
<a id="jc.parsers.resolve_conf"></a>
|
||||
|
||||
# jc.parsers.resolve\_conf
|
||||
|
||||
jc - JSON Convert `/etc/resolve.conf` file parser
|
||||
|
||||
This parser may be more forgiving than the system parser. For example, if
|
||||
multiple `search` lists are defined, this parser will append all entries to
|
||||
the `search` field, while the system parser may only use the list from the
|
||||
last defined instance.
|
||||
|
||||
Usage (cli):
|
||||
|
||||
$ cat /etc/resolve.conf | jc --resolve-conf
|
||||
|
||||
Usage (module):
|
||||
|
||||
import jc
|
||||
result = jc.parse('resolve_conf', resolve_conf_output)
|
||||
|
||||
Schema:
|
||||
|
||||
{
|
||||
"domain": string,
|
||||
"search": [
|
||||
string
|
||||
],
|
||||
"nameservers": [
|
||||
string
|
||||
],
|
||||
"options": [
|
||||
string
|
||||
],
|
||||
"sortlist": [
|
||||
string
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
Examples:
|
||||
|
||||
$ cat /etc/resolve.conf | jc --resolve-conf -p
|
||||
{
|
||||
"search": [
|
||||
"eng.myprime.com",
|
||||
"dev.eng.myprime.com",
|
||||
"labs.myprime.com",
|
||||
"qa.myprime.com"
|
||||
],
|
||||
"nameservers": [
|
||||
"10.136.17.15"
|
||||
],
|
||||
"options": [
|
||||
"rotate",
|
||||
"ndots:1"
|
||||
]
|
||||
}
|
||||
|
||||
<a id="jc.parsers.resolve_conf.parse"></a>
|
||||
|
||||
### parse
|
||||
|
||||
```python
|
||||
def parse(data: str, raw: bool = False, quiet: bool = False) -> JSONDictType
|
||||
```
|
||||
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) unprocessed output if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Raw or processed structured data.
|
||||
|
||||
### Parser Information
|
||||
Compatibility: linux, darwin, cygwin, win32, aix, freebsd
|
||||
|
||||
Version 1.0 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
@@ -184,4 +184,4 @@ Returns:
|
||||
### Parser Information
|
||||
Compatibility: linux
|
||||
|
||||
Version 1.6 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
Version 1.7 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
|
||||
69
docs/parsers/swapon.md
Normal file
69
docs/parsers/swapon.md
Normal file
@@ -0,0 +1,69 @@
|
||||
[Home](https://kellyjonbrazil.github.io/jc/)
|
||||
<a id="jc.parsers.swapon"></a>
|
||||
|
||||
# jc.parsers.swapon
|
||||
|
||||
jc - JSON Convert `swapon` command output parser
|
||||
|
||||
Usage (cli):
|
||||
|
||||
$ swapon | jc --swapon
|
||||
|
||||
or
|
||||
|
||||
$ jc swapon
|
||||
|
||||
Usage (module):
|
||||
|
||||
import jc
|
||||
result = jc.parse('swapon', swapon_command_output)
|
||||
|
||||
Schema:
|
||||
|
||||
[
|
||||
{
|
||||
"name": string,
|
||||
"type": string,
|
||||
"size": integer,
|
||||
"used": integer,
|
||||
"priority": integer
|
||||
}
|
||||
]
|
||||
|
||||
Example:
|
||||
|
||||
$ swapon | jc --swapon
|
||||
[
|
||||
{
|
||||
"name": "/swapfile",
|
||||
"type": "file",
|
||||
"size": 1073741824,
|
||||
"used": 0,
|
||||
"priority": -2
|
||||
}
|
||||
]
|
||||
|
||||
<a id="jc.parsers.swapon.parse"></a>
|
||||
|
||||
### parse
|
||||
|
||||
```python
|
||||
def parse(data: str, raw: bool = False, quiet: bool = False) -> List[_Entry]
|
||||
```
|
||||
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) unprocessed output if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Raw or processed structured data.
|
||||
|
||||
### Parser Information
|
||||
Compatibility: linux, freebsd
|
||||
|
||||
Version 1.0 by Roey Darwish Dror (roey.ghost@gmail.com)
|
||||
235
docs/parsers/tune2fs.md
Normal file
235
docs/parsers/tune2fs.md
Normal file
@@ -0,0 +1,235 @@
|
||||
[Home](https://kellyjonbrazil.github.io/jc/)
|
||||
<a id="jc.parsers.tune2fs"></a>
|
||||
|
||||
# jc.parsers.tune2fs
|
||||
|
||||
jc - JSON Convert `tune2fs -l` command output parser
|
||||
|
||||
Usage (cli):
|
||||
|
||||
$ tune2fs -l /dev/xvda4 | jc --tune2fs
|
||||
|
||||
or
|
||||
|
||||
$ jc tune2fs -l /dev/xvda4
|
||||
|
||||
Usage (module):
|
||||
|
||||
import jc
|
||||
result = jc.parse('tune2fs', tune2fs_command_output)
|
||||
|
||||
Schema:
|
||||
|
||||
{
|
||||
"version": string,
|
||||
"filesystem_volume_name": string,
|
||||
"last_mounted_on": string,
|
||||
"filesystem_uuid": string,
|
||||
"filesystem_magic_number": string,
|
||||
"filesystem_revision_number": string,
|
||||
"filesystem_features": [
|
||||
string
|
||||
],
|
||||
"filesystem_flags": string,
|
||||
"default_mount_options": string,
|
||||
"filesystem_state": string,
|
||||
"errors_behavior": string,
|
||||
"filesystem_os_type": string,
|
||||
"inode_count": integer,
|
||||
"block_count": integer,
|
||||
"reserved_block_count": integer,
|
||||
"overhead_clusters": integer,
|
||||
"free_blocks": integer,
|
||||
"free_inodes": integer,
|
||||
"first_block": integer,
|
||||
"block_size": integer,
|
||||
"fragment_size": integer,
|
||||
"group_descriptor_size": integer,
|
||||
"reserved_gdt_blocks": integer,
|
||||
"blocks_per_group": integer,
|
||||
"fragments_per_group": integer,
|
||||
"inodes_per_group": integer,
|
||||
"inode_blocks_per_group": integer,
|
||||
"flex_block_group_size": integer,
|
||||
"filesystem_created": string,
|
||||
"filesystem_created_epoch": integer,
|
||||
"filesystem_created_epoch_utc": integer,
|
||||
"last_mount_time": string,
|
||||
"last_mount_time_epoch": integer,
|
||||
"last_mount_time_epoch_utc": integer,
|
||||
"last_write_time": string,
|
||||
"last_write_time_epoch": integer,
|
||||
"last_write_time_epoch_utc": integer,
|
||||
"mount_count": integer,
|
||||
"maximum_mount_count": integer,
|
||||
"last_checked": string,
|
||||
"last_checked_epoch": integer,
|
||||
"last_checked_epoch_utc": integer,
|
||||
"check_interval": string,
|
||||
"lifetime_writes": string,
|
||||
"reserved_blocks_uid": string,
|
||||
"reserved_blocks_gid": string,
|
||||
"first_inode": integer,
|
||||
"inode_size": integer,
|
||||
"required_extra_isize": integer,
|
||||
"desired_extra_isize": integer,
|
||||
"journal_inode": integer,
|
||||
"default_directory_hash": string,
|
||||
"directory_hash_seed": string,
|
||||
"journal_backup": string,
|
||||
"checksum_type": string,
|
||||
"checksum": string
|
||||
}
|
||||
|
||||
Examples:
|
||||
|
||||
$ tune2fs | jc --tune2fs -p
|
||||
{
|
||||
"version": "1.46.2 (28-Feb-2021)",
|
||||
"filesystem_volume_name": "<none>",
|
||||
"last_mounted_on": "/home",
|
||||
"filesystem_uuid": "5fb78e1a-b214-44e2-a309-8e35116d8dd6",
|
||||
"filesystem_magic_number": "0xEF53",
|
||||
"filesystem_revision_number": "1 (dynamic)",
|
||||
"filesystem_features": [
|
||||
"has_journal",
|
||||
"ext_attr",
|
||||
"resize_inode",
|
||||
"dir_index",
|
||||
"filetype",
|
||||
"needs_recovery",
|
||||
"extent",
|
||||
"64bit",
|
||||
"flex_bg",
|
||||
"sparse_super",
|
||||
"large_file",
|
||||
"huge_file",
|
||||
"dir_nlink",
|
||||
"extra_isize",
|
||||
"metadata_csum"
|
||||
],
|
||||
"filesystem_flags": "signed_directory_hash",
|
||||
"default_mount_options": "user_xattr acl",
|
||||
"filesystem_state": "clean",
|
||||
"errors_behavior": "Continue",
|
||||
"filesystem_os_type": "Linux",
|
||||
"inode_count": 3932160,
|
||||
"block_count": 15728640,
|
||||
"reserved_block_count": 786432,
|
||||
"free_blocks": 15198453,
|
||||
"free_inodes": 3864620,
|
||||
"first_block": 0,
|
||||
"block_size": 4096,
|
||||
"fragment_size": 4096,
|
||||
"group_descriptor_size": 64,
|
||||
"reserved_gdt_blocks": 1024,
|
||||
"blocks_per_group": 32768,
|
||||
"fragments_per_group": 32768,
|
||||
"inodes_per_group": 8192,
|
||||
"inode_blocks_per_group": 512,
|
||||
"flex_block_group_size": 16,
|
||||
"filesystem_created": "Mon Apr 6 15:10:37 2020",
|
||||
"last_mount_time": "Mon Sep 19 15:16:20 2022",
|
||||
"last_write_time": "Mon Sep 19 15:16:20 2022",
|
||||
"mount_count": 14,
|
||||
"maximum_mount_count": -1,
|
||||
"last_checked": "Fri Apr 8 15:24:22 2022",
|
||||
"check_interval": "0 (<none>)",
|
||||
"lifetime_writes": "203 GB",
|
||||
"reserved_blocks_uid": "0 (user root)",
|
||||
"reserved_blocks_gid": "0 (group root)",
|
||||
"first_inode": 11,
|
||||
"inode_size": 256,
|
||||
"required_extra_isize": 32,
|
||||
"desired_extra_isize": 32,
|
||||
"journal_inode": 8,
|
||||
"default_directory_hash": "half_md4",
|
||||
"directory_hash_seed": "67d5358d-723d-4ce3-b3c0-30ddb433ad9e",
|
||||
"journal_backup": "inode blocks",
|
||||
"checksum_type": "crc32c",
|
||||
"checksum": "0x7809afff",
|
||||
"filesystem_created_epoch": 1586211037,
|
||||
"filesystem_created_epoch_utc": null,
|
||||
"last_mount_time_epoch": 1663625780,
|
||||
"last_mount_time_epoch_utc": null,
|
||||
"last_write_time_epoch": 1663625780,
|
||||
"last_write_time_epoch_utc": null,
|
||||
"last_checked_epoch": 1649456662,
|
||||
"last_checked_epoch_utc": null
|
||||
}
|
||||
|
||||
$ tune2fs | jc --tune2fs -p -r
|
||||
{
|
||||
"version": "1.46.2 (28-Feb-2021)",
|
||||
"filesystem_volume_name": "<none>",
|
||||
"last_mounted_on": "/home",
|
||||
"filesystem_uuid": "5fb78e1a-b214-44e2-a309-8e35116d8dd6",
|
||||
"filesystem_magic_number": "0xEF53",
|
||||
"filesystem_revision_number": "1 (dynamic)",
|
||||
"filesystem_features": "has_journal ext_attr resize_inode dir_index filetype needs_recovery extent 64bit flex_bg sparse_super large_file huge_file dir_nlink extra_isize metadata_csum",
|
||||
"filesystem_flags": "signed_directory_hash",
|
||||
"default_mount_options": "user_xattr acl",
|
||||
"filesystem_state": "clean",
|
||||
"errors_behavior": "Continue",
|
||||
"filesystem_os_type": "Linux",
|
||||
"inode_count": "3932160",
|
||||
"block_count": "15728640",
|
||||
"reserved_block_count": "786432",
|
||||
"free_blocks": "15198453",
|
||||
"free_inodes": "3864620",
|
||||
"first_block": "0",
|
||||
"block_size": "4096",
|
||||
"fragment_size": "4096",
|
||||
"group_descriptor_size": "64",
|
||||
"reserved_gdt_blocks": "1024",
|
||||
"blocks_per_group": "32768",
|
||||
"fragments_per_group": "32768",
|
||||
"inodes_per_group": "8192",
|
||||
"inode_blocks_per_group": "512",
|
||||
"flex_block_group_size": "16",
|
||||
"filesystem_created": "Mon Apr 6 15:10:37 2020",
|
||||
"last_mount_time": "Mon Sep 19 15:16:20 2022",
|
||||
"last_write_time": "Mon Sep 19 15:16:20 2022",
|
||||
"mount_count": "14",
|
||||
"maximum_mount_count": "-1",
|
||||
"last_checked": "Fri Apr 8 15:24:22 2022",
|
||||
"check_interval": "0 (<none>)",
|
||||
"lifetime_writes": "203 GB",
|
||||
"reserved_blocks_uid": "0 (user root)",
|
||||
"reserved_blocks_gid": "0 (group root)",
|
||||
"first_inode": "11",
|
||||
"inode_size": "256",
|
||||
"required_extra_isize": "32",
|
||||
"desired_extra_isize": "32",
|
||||
"journal_inode": "8",
|
||||
"default_directory_hash": "half_md4",
|
||||
"directory_hash_seed": "67d5358d-723d-4ce3-b3c0-30ddb433ad9e",
|
||||
"journal_backup": "inode blocks",
|
||||
"checksum_type": "crc32c",
|
||||
"checksum": "0x7809afff"
|
||||
}
|
||||
|
||||
<a id="jc.parsers.tune2fs.parse"></a>
|
||||
|
||||
### parse
|
||||
|
||||
```python
|
||||
def parse(data: str, raw: bool = False, quiet: bool = False) -> JSONDictType
|
||||
```
|
||||
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) unprocessed output if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Raw or processed structured data.
|
||||
|
||||
### Parser Information
|
||||
Compatibility: linux
|
||||
|
||||
Version 1.0 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
@@ -7,7 +7,7 @@ jc - JSON Convert Version string output parser
|
||||
|
||||
Best-effort attempt to parse various styles of version numbers. This parser
|
||||
is based off of the version parser included in the CPython distutils
|
||||
libary.
|
||||
library.
|
||||
|
||||
If the version string conforms to some de facto-standard versioning rules
|
||||
followed by many developers a `strict` key will be present in the output
|
||||
|
||||
@@ -149,4 +149,4 @@ Returns:
|
||||
### Parser Information
|
||||
Compatibility: linux
|
||||
|
||||
Version 1.3 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
Version 1.4 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
|
||||
@@ -123,4 +123,4 @@ Returns:
|
||||
### Parser Information
|
||||
Compatibility: linux
|
||||
|
||||
Version 1.2 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
Version 1.3 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
|
||||
@@ -158,4 +158,4 @@ Returns:
|
||||
### Parser Information
|
||||
Compatibility: linux, darwin, cygwin, aix, freebsd
|
||||
|
||||
Version 1.7 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
Version 1.8 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
|
||||
@@ -433,4 +433,4 @@ Returns:
|
||||
### Parser Information
|
||||
Compatibility: linux, darwin, cygwin, win32, aix, freebsd
|
||||
|
||||
Version 1.2 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
Version 1.3 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
|
||||
@@ -98,4 +98,4 @@ Returns:
|
||||
### Parser Information
|
||||
Compatibility: linux, darwin, cygwin, win32, aix, freebsd
|
||||
|
||||
Version 1.7 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
Version 1.9 by Kelly Brazil (kellyjonbrazil@gmail.com)
|
||||
|
||||
@@ -31,22 +31,24 @@ Schema:
|
||||
"current_height": integer,
|
||||
"maximum_width": integer,
|
||||
"maximum_height": integer,
|
||||
"associated_device": {
|
||||
"associated_modes": [
|
||||
{
|
||||
"resolution_width": integer,
|
||||
"resolution_height": integer,
|
||||
"is_high_resolution": boolean,
|
||||
"frequencies": [
|
||||
{
|
||||
"frequency": float,
|
||||
"is_current": boolean,
|
||||
"is_preferred": boolean
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"devices": [
|
||||
{
|
||||
"modes": [
|
||||
{
|
||||
"resolution_width": integer,
|
||||
"resolution_height": integer,
|
||||
"is_high_resolution": boolean,
|
||||
"frequencies": [
|
||||
{
|
||||
"frequency": float,
|
||||
"is_current": boolean,
|
||||
"is_preferred": boolean
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"is_connected": boolean,
|
||||
"is_primary": boolean,
|
||||
"device_name": string,
|
||||
@@ -62,24 +64,6 @@ Schema:
|
||||
"rotation": string,
|
||||
"reflection": string
|
||||
}
|
||||
],
|
||||
"unassociated_devices": [
|
||||
{
|
||||
"associated_modes": [
|
||||
{
|
||||
"resolution_width": integer,
|
||||
"resolution_height": integer,
|
||||
"is_high_resolution": boolean,
|
||||
"frequencies": [
|
||||
{
|
||||
"frequency": float,
|
||||
"is_current": boolean,
|
||||
"is_preferred": boolean
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -96,53 +80,54 @@ Examples:
|
||||
"current_height": 1080,
|
||||
"maximum_width": 32767,
|
||||
"maximum_height": 32767,
|
||||
"associated_device": {
|
||||
"associated_modes": [
|
||||
{
|
||||
"resolution_width": 1920,
|
||||
"resolution_height": 1080,
|
||||
"is_high_resolution": false,
|
||||
"frequencies": [
|
||||
{
|
||||
"frequency": 60.03,
|
||||
"is_current": true,
|
||||
"is_preferred": true
|
||||
},
|
||||
{
|
||||
"frequency": 59.93,
|
||||
"is_current": false,
|
||||
"is_preferred": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"resolution_width": 1680,
|
||||
"resolution_height": 1050,
|
||||
"is_high_resolution": false,
|
||||
"frequencies": [
|
||||
{
|
||||
"frequency": 59.88,
|
||||
"is_current": false,
|
||||
"is_preferred": false
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"is_connected": true,
|
||||
"is_primary": true,
|
||||
"device_name": "eDP1",
|
||||
"resolution_width": 1920,
|
||||
"resolution_height": 1080,
|
||||
"offset_width": 0,
|
||||
"offset_height": 0,
|
||||
"dimension_width": 310,
|
||||
"dimension_height": 170,
|
||||
"rotation": "normal",
|
||||
"reflection": "normal"
|
||||
}
|
||||
"devices": [
|
||||
{
|
||||
"modes": [
|
||||
{
|
||||
"resolution_width": 1920,
|
||||
"resolution_height": 1080,
|
||||
"is_high_resolution": false,
|
||||
"frequencies": [
|
||||
{
|
||||
"frequency": 60.03,
|
||||
"is_current": true,
|
||||
"is_preferred": true
|
||||
},
|
||||
{
|
||||
"frequency": 59.93,
|
||||
"is_current": false,
|
||||
"is_preferred": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"resolution_width": 1680,
|
||||
"resolution_height": 1050,
|
||||
"is_high_resolution": false,
|
||||
"frequencies": [
|
||||
{
|
||||
"frequency": 59.88,
|
||||
"is_current": false,
|
||||
"is_preferred": false
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"is_connected": true,
|
||||
"is_primary": true,
|
||||
"device_name": "eDP1",
|
||||
"resolution_width": 1920,
|
||||
"resolution_height": 1080,
|
||||
"offset_width": 0,
|
||||
"offset_height": 0,
|
||||
"dimension_width": 310,
|
||||
"dimension_height": 170,
|
||||
"rotation": "normal",
|
||||
"reflection": "normal"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"unassociated_devices": []
|
||||
]
|
||||
}
|
||||
|
||||
$ xrandr --properties | jc --xrandr -p
|
||||
@@ -156,56 +141,57 @@ Examples:
|
||||
"current_height": 1080,
|
||||
"maximum_width": 32767,
|
||||
"maximum_height": 32767,
|
||||
"associated_device": {
|
||||
"associated_modes": [
|
||||
{
|
||||
"resolution_width": 1920,
|
||||
"resolution_height": 1080,
|
||||
"is_high_resolution": false,
|
||||
"frequencies": [
|
||||
{
|
||||
"frequency": 60.03,
|
||||
"is_current": true,
|
||||
"is_preferred": true
|
||||
},
|
||||
{
|
||||
"frequency": 59.93,
|
||||
"is_current": false,
|
||||
"is_preferred": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"resolution_width": 1680,
|
||||
"resolution_height": 1050,
|
||||
"is_high_resolution": false,
|
||||
"frequencies": [
|
||||
{
|
||||
"frequency": 59.88,
|
||||
"is_current": false,
|
||||
"is_preferred": false
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"is_connected": true,
|
||||
"is_primary": true,
|
||||
"device_name": "eDP1",
|
||||
"model_name": "ASUS VW193S",
|
||||
"product_id": "54297",
|
||||
"serial_number": "78L8021107",
|
||||
"resolution_width": 1920,
|
||||
"resolution_height": 1080,
|
||||
"offset_width": 0,
|
||||
"offset_height": 0,
|
||||
"dimension_width": 310,
|
||||
"dimension_height": 170,
|
||||
"rotation": "normal",
|
||||
"reflection": "normal"
|
||||
}
|
||||
"devices": [
|
||||
{
|
||||
"modes": [
|
||||
{
|
||||
"resolution_width": 1920,
|
||||
"resolution_height": 1080,
|
||||
"is_high_resolution": false,
|
||||
"frequencies": [
|
||||
{
|
||||
"frequency": 60.03,
|
||||
"is_current": true,
|
||||
"is_preferred": true
|
||||
},
|
||||
{
|
||||
"frequency": 59.93,
|
||||
"is_current": false,
|
||||
"is_preferred": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"resolution_width": 1680,
|
||||
"resolution_height": 1050,
|
||||
"is_high_resolution": false,
|
||||
"frequencies": [
|
||||
{
|
||||
"frequency": 59.88,
|
||||
"is_current": false,
|
||||
"is_preferred": false
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"is_connected": true,
|
||||
"is_primary": true,
|
||||
"device_name": "eDP1",
|
||||
"model_name": "ASUS VW193S",
|
||||
"product_id": "54297",
|
||||
"serial_number": "78L8021107",
|
||||
"resolution_width": 1920,
|
||||
"resolution_height": 1080,
|
||||
"offset_width": 0,
|
||||
"offset_height": 0,
|
||||
"dimension_width": 310,
|
||||
"dimension_height": 170,
|
||||
"rotation": "normal",
|
||||
"reflection": "normal"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"unassociated_devices": []
|
||||
]
|
||||
}
|
||||
|
||||
<a id="jc.parsers.xrandr.parse"></a>
|
||||
@@ -231,4 +217,4 @@ Returns:
|
||||
### Parser Information
|
||||
Compatibility: linux, darwin, cygwin, aix, freebsd
|
||||
|
||||
Version 1.2 by Kevin Lyter (lyter_git at sent.com)
|
||||
Version 1.4 by Kevin Lyter (code (at) lyterk.com)
|
||||
|
||||
@@ -9,6 +9,7 @@
|
||||
* [convert\_to\_int](#jc.utils.convert_to_int)
|
||||
* [convert\_to\_float](#jc.utils.convert_to_float)
|
||||
* [convert\_to\_bool](#jc.utils.convert_to_bool)
|
||||
* [convert\_size\_to\_int](#jc.utils.convert_size_to_int)
|
||||
* [input\_type\_check](#jc.utils.input_type_check)
|
||||
* [timestamp](#jc.utils.timestamp)
|
||||
* [\_\_init\_\_](#jc.utils.timestamp.__init__)
|
||||
@@ -178,6 +179,48 @@ Returns:
|
||||
True/False False unless a 'truthy' number or string is found
|
||||
('y', 'yes', 'true', '1', 1, -1, etc.)
|
||||
|
||||
<a id="jc.utils.convert_size_to_int"></a>
|
||||
|
||||
### convert\_size\_to\_int
|
||||
|
||||
```python
|
||||
def convert_size_to_int(size: str, binary: bool = False) -> Optional[int]
|
||||
```
|
||||
|
||||
Parse a human readable data size and return the number of bytes.
|
||||
|
||||
Parameters:
|
||||
|
||||
size: (string) The human readable file size to parse.
|
||||
binary: (boolean) `True` to use binary multiples of bytes
|
||||
(base-2) for ambiguous unit symbols and names,
|
||||
`False` to use decimal multiples of bytes (base-10).
|
||||
Returns:
|
||||
|
||||
integer/None Integer if successful conversion, otherwise None
|
||||
|
||||
This function knows how to parse sizes in bytes, kilobytes, megabytes,
|
||||
gigabytes, terabytes and petabytes. Some examples:
|
||||
|
||||
>>> convert_size_to_int('42')
|
||||
42
|
||||
>>> convert_size_to_int('13b')
|
||||
13
|
||||
>>> convert_size_to_int('5 bytes')
|
||||
5
|
||||
>>> convert_size_to_int('1 KB')
|
||||
1000
|
||||
>>> convert_size_to_int('1 kilobyte')
|
||||
1000
|
||||
>>> convert_size_to_int('1 KiB')
|
||||
1024
|
||||
>>> convert_size_to_int('1 KB', binary=True)
|
||||
1024
|
||||
>>> convert_size_to_int('1.5 GB')
|
||||
1500000000
|
||||
>>> convert_size_to_int('1.5 GB', binary=True)
|
||||
1610612736
|
||||
|
||||
<a id="jc.utils.input_type_check"></a>
|
||||
|
||||
### input\_type\_check
|
||||
|
||||
@@ -45,10 +45,6 @@ __version_info__ = tuple(int(segment) for segment in __version__.split("."))
|
||||
import sys
|
||||
import os
|
||||
|
||||
PY3 = sys.version_info[0] == 3
|
||||
|
||||
if PY3:
|
||||
unicode = str
|
||||
|
||||
if sys.platform.startswith('java'):
|
||||
import platform
|
||||
@@ -490,10 +486,7 @@ def _get_win_folder_from_registry(csidl_name):
|
||||
registry for this guarantees us the correct answer for all CSIDL_*
|
||||
names.
|
||||
"""
|
||||
if PY3:
|
||||
import winreg as _winreg
|
||||
else:
|
||||
import _winreg
|
||||
import winreg as _winreg
|
||||
|
||||
shell_folder_name = {
|
||||
"CSIDL_APPDATA": "AppData",
|
||||
|
||||
45
jc/cli.py
45
jc/cli.py
@@ -145,33 +145,34 @@ class JcCli():
|
||||
JC_COLORS=blue,brightblack,magenta,green
|
||||
JC_COLORS=default,default,default,default
|
||||
"""
|
||||
input_error = False
|
||||
env_colors = os.getenv('JC_COLORS')
|
||||
if PYGMENTS_INSTALLED:
|
||||
input_error = False
|
||||
env_colors = os.getenv('JC_COLORS')
|
||||
|
||||
if env_colors:
|
||||
color_list = env_colors.split(',')
|
||||
else:
|
||||
color_list = ['default', 'default', 'default', 'default']
|
||||
if env_colors:
|
||||
color_list = env_colors.split(',')
|
||||
else:
|
||||
color_list = ['default', 'default', 'default', 'default']
|
||||
|
||||
if len(color_list) != 4:
|
||||
input_error = True
|
||||
|
||||
for color in color_list:
|
||||
if color != 'default' and color not in PYGMENT_COLOR:
|
||||
if len(color_list) != 4:
|
||||
input_error = True
|
||||
|
||||
# if there is an issue with the env variable, just set all colors to default and move on
|
||||
if input_error:
|
||||
utils.warning_message(['Could not parse JC_COLORS environment variable'])
|
||||
color_list = ['default', 'default', 'default', 'default']
|
||||
for color in color_list:
|
||||
if color != 'default' and color not in PYGMENT_COLOR:
|
||||
input_error = True
|
||||
|
||||
# Try the color set in the JC_COLORS env variable first. If it is set to default, then fall back to default colors
|
||||
self.custom_colors = {
|
||||
Name.Tag: f'bold {PYGMENT_COLOR[color_list[0]]}' if color_list[0] != 'default' else f"bold {PYGMENT_COLOR['blue']}", # key names
|
||||
Keyword: PYGMENT_COLOR[color_list[1]] if color_list[1] != 'default' else PYGMENT_COLOR['brightblack'], # true, false, null
|
||||
Number: PYGMENT_COLOR[color_list[2]] if color_list[2] != 'default' else PYGMENT_COLOR['magenta'], # numbers
|
||||
String: PYGMENT_COLOR[color_list[3]] if color_list[3] != 'default' else PYGMENT_COLOR['green'] # strings
|
||||
}
|
||||
# if there is an issue with the env variable, just set all colors to default and move on
|
||||
if input_error:
|
||||
utils.warning_message(['Could not parse JC_COLORS environment variable'])
|
||||
color_list = ['default', 'default', 'default', 'default']
|
||||
|
||||
# Try the color set in the JC_COLORS env variable first. If it is set to default, then fall back to default colors
|
||||
self.custom_colors = {
|
||||
Name.Tag: f'bold {PYGMENT_COLOR[color_list[0]]}' if color_list[0] != 'default' else f"bold {PYGMENT_COLOR['blue']}", # key names
|
||||
Keyword: PYGMENT_COLOR[color_list[1]] if color_list[1] != 'default' else PYGMENT_COLOR['brightblack'], # true, false, null
|
||||
Number: PYGMENT_COLOR[color_list[2]] if color_list[2] != 'default' else PYGMENT_COLOR['magenta'], # numbers
|
||||
String: PYGMENT_COLOR[color_list[3]] if color_list[3] != 'default' else PYGMENT_COLOR['green'] # strings
|
||||
}
|
||||
|
||||
def set_mono(self) -> None:
|
||||
"""
|
||||
|
||||
17
jc/lib.py
17
jc/lib.py
@@ -9,7 +9,7 @@ from .jc_types import ParserInfoType, JSONDictType
|
||||
from jc import appdirs
|
||||
|
||||
|
||||
__version__ = '1.23.3'
|
||||
__version__ = '1.24.0'
|
||||
|
||||
parsers: List[str] = [
|
||||
'acpi',
|
||||
@@ -34,6 +34,7 @@ parsers: List[str] = [
|
||||
'csv-s',
|
||||
'date',
|
||||
'datetime-iso',
|
||||
'debconf-show',
|
||||
'df',
|
||||
'dig',
|
||||
'dir',
|
||||
@@ -43,6 +44,7 @@ parsers: List[str] = [
|
||||
'email-address',
|
||||
'env',
|
||||
'file',
|
||||
'find',
|
||||
'findmnt',
|
||||
'finger',
|
||||
'free',
|
||||
@@ -57,6 +59,7 @@ parsers: List[str] = [
|
||||
'hashsum',
|
||||
'hciconfig',
|
||||
'history',
|
||||
'host',
|
||||
'hosts',
|
||||
'id',
|
||||
'ifconfig',
|
||||
@@ -66,7 +69,7 @@ parsers: List[str] = [
|
||||
'iostat-s',
|
||||
'ip-address',
|
||||
'iptables',
|
||||
'iso-datetime',
|
||||
'ip-route',
|
||||
'iw-scan',
|
||||
'iwconfig',
|
||||
'jar-manifest',
|
||||
@@ -77,6 +80,7 @@ parsers: List[str] = [
|
||||
'ls',
|
||||
'ls-s',
|
||||
'lsattr',
|
||||
'lsb-release',
|
||||
'lsblk',
|
||||
'lsmod',
|
||||
'lsof',
|
||||
@@ -89,9 +93,11 @@ parsers: List[str] = [
|
||||
'mpstat-s',
|
||||
'netstat',
|
||||
'nmcli',
|
||||
'nsd-control',
|
||||
'ntpq',
|
||||
'openvpn',
|
||||
'os-prober',
|
||||
'os-release',
|
||||
'passwd',
|
||||
'pci-ids',
|
||||
'pgpass',
|
||||
@@ -101,10 +107,13 @@ parsers: List[str] = [
|
||||
'ping-s',
|
||||
'pip-list',
|
||||
'pip-show',
|
||||
'pkg-index-apk',
|
||||
'pkg-index-deb',
|
||||
'plist',
|
||||
'postconf',
|
||||
'proc',
|
||||
'proc-buddyinfo',
|
||||
'proc-cmdline',
|
||||
'proc-consoles',
|
||||
'proc-cpuinfo',
|
||||
'proc-crypto',
|
||||
@@ -143,6 +152,7 @@ parsers: List[str] = [
|
||||
'proc-net-packet',
|
||||
'proc-net-protocols',
|
||||
'proc-net-route',
|
||||
'proc-net-tcp',
|
||||
'proc-net-unix',
|
||||
'proc-pid-fdinfo',
|
||||
'proc-pid-io',
|
||||
@@ -154,6 +164,7 @@ parsers: List[str] = [
|
||||
'proc-pid-statm',
|
||||
'proc-pid-status',
|
||||
'ps',
|
||||
'resolve-conf',
|
||||
'route',
|
||||
'rpm-qi',
|
||||
'rsync',
|
||||
@@ -167,6 +178,7 @@ parsers: List[str] = [
|
||||
'sshd-conf',
|
||||
'stat',
|
||||
'stat-s',
|
||||
'swapon',
|
||||
'sysctl',
|
||||
'syslog',
|
||||
'syslog-s',
|
||||
@@ -185,6 +197,7 @@ parsers: List[str] = [
|
||||
'top-s',
|
||||
'tracepath',
|
||||
'traceroute',
|
||||
'tune2fs',
|
||||
'udevadm',
|
||||
'ufw',
|
||||
'ufw-appinfo',
|
||||
|
||||
@@ -227,7 +227,7 @@ import jc.utils
|
||||
|
||||
class info():
|
||||
"""Provides parser metadata (version, author, etc.)"""
|
||||
version = '1.6'
|
||||
version = '1.7'
|
||||
description = '`acpi` command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -337,19 +337,15 @@ def parse(data, raw=False, quiet=False):
|
||||
output_line['state'] = 'Not charging'
|
||||
output_line['charge_percent'] = line.split()[-1].rstrip('%,')
|
||||
|
||||
if 'Charging' in line \
|
||||
or 'Discharging' in line \
|
||||
or 'Full' in line:
|
||||
|
||||
if any(word in line for word in ('Charging', 'Discharging', 'Full')):
|
||||
output_line['state'] = line.split()[2][:-1]
|
||||
output_line['charge_percent'] = line.split()[3].rstrip('%,')
|
||||
if 'will never fully discharge' in line:
|
||||
if 'will never fully discharge' in line or 'rate information unavailable' in line:
|
||||
pass
|
||||
elif 'rate information unavailable' not in line:
|
||||
if 'Charging' in line:
|
||||
output_line['until_charged'] = line.split()[4]
|
||||
if 'Discharging' in line:
|
||||
output_line['charge_remaining'] = line.split()[4]
|
||||
elif 'Charging' in line:
|
||||
output_line['until_charged'] = line.split()[4]
|
||||
elif 'Discharging' in line:
|
||||
output_line['charge_remaining'] = line.split()[4]
|
||||
|
||||
if 'design capacity' in line:
|
||||
output_line['design_capacity_mah'] = line.split()[4]
|
||||
@@ -359,10 +355,7 @@ def parse(data, raw=False, quiet=False):
|
||||
if obj_type == 'Adapter':
|
||||
output_line['type'] = obj_type
|
||||
output_line['id'] = obj_id
|
||||
if 'on-line' in line:
|
||||
output_line['on-line'] = True
|
||||
else:
|
||||
output_line['on-line'] = False
|
||||
output_line['on-line'] = 'on-line' in line
|
||||
|
||||
if obj_type == 'Thermal':
|
||||
output_line['type'] = obj_type
|
||||
|
||||
@@ -5,7 +5,7 @@ import socket
|
||||
import struct
|
||||
|
||||
from ._errors import unwrap
|
||||
from ._types import byte_cls, bytes_to_list, str_cls, type_name
|
||||
from ._types import type_name
|
||||
|
||||
|
||||
def inet_ntop(address_family, packed_ip):
|
||||
@@ -33,7 +33,7 @@ def inet_ntop(address_family, packed_ip):
|
||||
repr(address_family)
|
||||
))
|
||||
|
||||
if not isinstance(packed_ip, byte_cls):
|
||||
if not isinstance(packed_ip, bytes):
|
||||
raise TypeError(unwrap(
|
||||
'''
|
||||
packed_ip must be a byte string, not %s
|
||||
@@ -52,7 +52,7 @@ def inet_ntop(address_family, packed_ip):
|
||||
))
|
||||
|
||||
if address_family == socket.AF_INET:
|
||||
return '%d.%d.%d.%d' % tuple(bytes_to_list(packed_ip))
|
||||
return '%d.%d.%d.%d' % tuple(list(packed_ip))
|
||||
|
||||
octets = struct.unpack(b'!HHHHHHHH', packed_ip)
|
||||
|
||||
@@ -106,7 +106,7 @@ def inet_pton(address_family, ip_string):
|
||||
repr(address_family)
|
||||
))
|
||||
|
||||
if not isinstance(ip_string, str_cls):
|
||||
if not isinstance(ip_string, str):
|
||||
raise TypeError(unwrap(
|
||||
'''
|
||||
ip_string must be a unicode string, not %s
|
||||
|
||||
@@ -13,25 +13,16 @@ from __future__ import unicode_literals, division, absolute_import, print_functi
|
||||
from encodings import idna # noqa
|
||||
import codecs
|
||||
import re
|
||||
import sys
|
||||
|
||||
from ._errors import unwrap
|
||||
from ._types import byte_cls, str_cls, type_name, bytes_to_list, int_types
|
||||
from ._types import type_name
|
||||
|
||||
if sys.version_info < (3,):
|
||||
from urlparse import urlsplit, urlunsplit
|
||||
from urllib import (
|
||||
quote as urlquote,
|
||||
unquote as unquote_to_bytes,
|
||||
)
|
||||
|
||||
else:
|
||||
from urllib.parse import (
|
||||
quote as urlquote,
|
||||
unquote_to_bytes,
|
||||
urlsplit,
|
||||
urlunsplit,
|
||||
)
|
||||
from urllib.parse import (
|
||||
quote as urlquote,
|
||||
unquote_to_bytes,
|
||||
urlsplit,
|
||||
urlunsplit,
|
||||
)
|
||||
|
||||
|
||||
def iri_to_uri(value, normalize=False):
|
||||
@@ -48,7 +39,7 @@ def iri_to_uri(value, normalize=False):
|
||||
A byte string of the ASCII-encoded URI
|
||||
"""
|
||||
|
||||
if not isinstance(value, str_cls):
|
||||
if not isinstance(value, str):
|
||||
raise TypeError(unwrap(
|
||||
'''
|
||||
value must be a unicode string, not %s
|
||||
@@ -57,19 +48,7 @@ def iri_to_uri(value, normalize=False):
|
||||
))
|
||||
|
||||
scheme = None
|
||||
# Python 2.6 doesn't split properly is the URL doesn't start with http:// or https://
|
||||
if sys.version_info < (2, 7) and not value.startswith('http://') and not value.startswith('https://'):
|
||||
real_prefix = None
|
||||
prefix_match = re.match('^[^:]*://', value)
|
||||
if prefix_match:
|
||||
real_prefix = prefix_match.group(0)
|
||||
value = 'http://' + value[len(real_prefix):]
|
||||
parsed = urlsplit(value)
|
||||
if real_prefix:
|
||||
value = real_prefix + value[7:]
|
||||
scheme = _urlquote(real_prefix[:-3])
|
||||
else:
|
||||
parsed = urlsplit(value)
|
||||
parsed = urlsplit(value)
|
||||
|
||||
if scheme is None:
|
||||
scheme = _urlquote(parsed.scheme)
|
||||
@@ -81,7 +60,7 @@ def iri_to_uri(value, normalize=False):
|
||||
password = _urlquote(parsed.password, safe='!$&\'()*+,;=')
|
||||
port = parsed.port
|
||||
if port is not None:
|
||||
port = str_cls(port).encode('ascii')
|
||||
port = str(port).encode('ascii')
|
||||
|
||||
netloc = b''
|
||||
if username is not None:
|
||||
@@ -112,7 +91,7 @@ def iri_to_uri(value, normalize=False):
|
||||
path = ''
|
||||
|
||||
output = urlunsplit((scheme, netloc, path, query, fragment))
|
||||
if isinstance(output, str_cls):
|
||||
if isinstance(output, str):
|
||||
output = output.encode('latin1')
|
||||
return output
|
||||
|
||||
@@ -128,7 +107,7 @@ def uri_to_iri(value):
|
||||
A unicode string of the IRI
|
||||
"""
|
||||
|
||||
if not isinstance(value, byte_cls):
|
||||
if not isinstance(value, bytes):
|
||||
raise TypeError(unwrap(
|
||||
'''
|
||||
value must be a byte string, not %s
|
||||
@@ -148,7 +127,7 @@ def uri_to_iri(value):
|
||||
if hostname:
|
||||
hostname = hostname.decode('idna')
|
||||
port = parsed.port
|
||||
if port and not isinstance(port, int_types):
|
||||
if port and not isinstance(port, int):
|
||||
port = port.decode('ascii')
|
||||
|
||||
netloc = ''
|
||||
@@ -160,7 +139,7 @@ def uri_to_iri(value):
|
||||
if hostname is not None:
|
||||
netloc += hostname
|
||||
if port is not None:
|
||||
netloc += ':' + str_cls(port)
|
||||
netloc += ':' + str(port)
|
||||
|
||||
path = _urlunquote(parsed.path, remap=['/'], preserve=True)
|
||||
query = _urlunquote(parsed.query, remap=['&', '='], preserve=True)
|
||||
@@ -182,7 +161,7 @@ def _iri_utf8_errors_handler(exc):
|
||||
resume at)
|
||||
"""
|
||||
|
||||
bytes_as_ints = bytes_to_list(exc.object[exc.start:exc.end])
|
||||
bytes_as_ints = list(exc.object[exc.start:exc.end])
|
||||
replacements = ['%%%02x' % num for num in bytes_as_ints]
|
||||
return (''.join(replacements), exc.end)
|
||||
|
||||
@@ -230,7 +209,7 @@ def _urlquote(string, safe=''):
|
||||
string = re.sub('%[0-9a-fA-F]{2}', _extract_escape, string)
|
||||
|
||||
output = urlquote(string.encode('utf-8'), safe=safe.encode('utf-8'))
|
||||
if not isinstance(output, byte_cls):
|
||||
if not isinstance(output, bytes):
|
||||
output = output.encode('ascii')
|
||||
|
||||
# Restore the existing quoted values that we extracted
|
||||
|
||||
@@ -1,135 +0,0 @@
|
||||
# Copyright (c) 2009 Raymond Hettinger
|
||||
#
|
||||
# Permission is hereby granted, free of charge, to any person
|
||||
# obtaining a copy of this software and associated documentation files
|
||||
# (the "Software"), to deal in the Software without restriction,
|
||||
# including without limitation the rights to use, copy, modify, merge,
|
||||
# publish, distribute, sublicense, and/or sell copies of the Software,
|
||||
# and to permit persons to whom the Software is furnished to do so,
|
||||
# subject to the following conditions:
|
||||
#
|
||||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
#
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
|
||||
# OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
|
||||
# HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
|
||||
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
|
||||
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
|
||||
# OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
import sys
|
||||
|
||||
if not sys.version_info < (2, 7):
|
||||
|
||||
from collections import OrderedDict
|
||||
|
||||
else:
|
||||
|
||||
from UserDict import DictMixin
|
||||
|
||||
class OrderedDict(dict, DictMixin):
|
||||
|
||||
def __init__(self, *args, **kwds):
|
||||
if len(args) > 1:
|
||||
raise TypeError('expected at most 1 arguments, got %d' % len(args))
|
||||
try:
|
||||
self.__end
|
||||
except AttributeError:
|
||||
self.clear()
|
||||
self.update(*args, **kwds)
|
||||
|
||||
def clear(self):
|
||||
self.__end = end = []
|
||||
end += [None, end, end] # sentinel node for doubly linked list
|
||||
self.__map = {} # key --> [key, prev, next]
|
||||
dict.clear(self)
|
||||
|
||||
def __setitem__(self, key, value):
|
||||
if key not in self:
|
||||
end = self.__end
|
||||
curr = end[1]
|
||||
curr[2] = end[1] = self.__map[key] = [key, curr, end]
|
||||
dict.__setitem__(self, key, value)
|
||||
|
||||
def __delitem__(self, key):
|
||||
dict.__delitem__(self, key)
|
||||
key, prev, next_ = self.__map.pop(key)
|
||||
prev[2] = next_
|
||||
next_[1] = prev
|
||||
|
||||
def __iter__(self):
|
||||
end = self.__end
|
||||
curr = end[2]
|
||||
while curr is not end:
|
||||
yield curr[0]
|
||||
curr = curr[2]
|
||||
|
||||
def __reversed__(self):
|
||||
end = self.__end
|
||||
curr = end[1]
|
||||
while curr is not end:
|
||||
yield curr[0]
|
||||
curr = curr[1]
|
||||
|
||||
def popitem(self, last=True):
|
||||
if not self:
|
||||
raise KeyError('dictionary is empty')
|
||||
if last:
|
||||
key = reversed(self).next()
|
||||
else:
|
||||
key = iter(self).next()
|
||||
value = self.pop(key)
|
||||
return key, value
|
||||
|
||||
def __reduce__(self):
|
||||
items = [[k, self[k]] for k in self]
|
||||
tmp = self.__map, self.__end
|
||||
del self.__map, self.__end
|
||||
inst_dict = vars(self).copy()
|
||||
self.__map, self.__end = tmp
|
||||
if inst_dict:
|
||||
return (self.__class__, (items,), inst_dict)
|
||||
return self.__class__, (items,)
|
||||
|
||||
def keys(self):
|
||||
return list(self)
|
||||
|
||||
setdefault = DictMixin.setdefault
|
||||
update = DictMixin.update
|
||||
pop = DictMixin.pop
|
||||
values = DictMixin.values
|
||||
items = DictMixin.items
|
||||
iterkeys = DictMixin.iterkeys
|
||||
itervalues = DictMixin.itervalues
|
||||
iteritems = DictMixin.iteritems
|
||||
|
||||
def __repr__(self):
|
||||
if not self:
|
||||
return '%s()' % (self.__class__.__name__,)
|
||||
return '%s(%r)' % (self.__class__.__name__, self.items())
|
||||
|
||||
def copy(self):
|
||||
return self.__class__(self)
|
||||
|
||||
@classmethod
|
||||
def fromkeys(cls, iterable, value=None):
|
||||
d = cls()
|
||||
for key in iterable:
|
||||
d[key] = value
|
||||
return d
|
||||
|
||||
def __eq__(self, other):
|
||||
if isinstance(other, OrderedDict):
|
||||
if len(self) != len(other):
|
||||
return False
|
||||
for p, q in zip(self.items(), other.items()):
|
||||
if p != q:
|
||||
return False
|
||||
return True
|
||||
return dict.__eq__(self, other)
|
||||
|
||||
def __ne__(self, other):
|
||||
return not self == other
|
||||
|
||||
@@ -2,28 +2,10 @@
|
||||
from __future__ import unicode_literals, division, absolute_import, print_function
|
||||
|
||||
import inspect
|
||||
import sys
|
||||
|
||||
|
||||
if sys.version_info < (3,):
|
||||
str_cls = unicode # noqa
|
||||
byte_cls = str
|
||||
int_types = (int, long) # noqa
|
||||
|
||||
def bytes_to_list(byte_string):
|
||||
return [ord(b) for b in byte_string]
|
||||
|
||||
chr_cls = chr
|
||||
|
||||
else:
|
||||
str_cls = str
|
||||
byte_cls = bytes
|
||||
int_types = int
|
||||
|
||||
bytes_to_list = list
|
||||
|
||||
def chr_cls(num):
|
||||
return bytes([num])
|
||||
def chr_cls(num):
|
||||
return bytes([num])
|
||||
|
||||
|
||||
def type_name(value):
|
||||
|
||||
@@ -48,8 +48,10 @@ Other type classes are defined that help compose the types listed above.
|
||||
|
||||
from __future__ import unicode_literals, division, absolute_import, print_function
|
||||
|
||||
from collections import OrderedDict
|
||||
from datetime import datetime, timedelta
|
||||
from fractions import Fraction
|
||||
from io import BytesIO
|
||||
import binascii
|
||||
import copy
|
||||
import math
|
||||
@@ -58,22 +60,10 @@ import sys
|
||||
|
||||
from . import _teletex_codec
|
||||
from ._errors import unwrap
|
||||
from ._ordereddict import OrderedDict
|
||||
from ._types import type_name, str_cls, byte_cls, int_types, chr_cls
|
||||
from ._types import type_name, chr_cls
|
||||
from .parser import _parse, _dump_header
|
||||
from .util import int_to_bytes, int_from_bytes, timezone, extended_datetime, create_timezone, utc_with_dst
|
||||
|
||||
if sys.version_info <= (3,):
|
||||
from cStringIO import StringIO as BytesIO
|
||||
|
||||
range = xrange # noqa
|
||||
_PY2 = True
|
||||
|
||||
else:
|
||||
from io import BytesIO
|
||||
|
||||
_PY2 = False
|
||||
|
||||
|
||||
_teletex_codec.register()
|
||||
|
||||
@@ -220,7 +210,7 @@ class Asn1Value(object):
|
||||
An instance of the current class
|
||||
"""
|
||||
|
||||
if not isinstance(encoded_data, byte_cls):
|
||||
if not isinstance(encoded_data, bytes):
|
||||
raise TypeError('encoded_data must be a byte string, not %s' % type_name(encoded_data))
|
||||
|
||||
spec = None
|
||||
@@ -291,7 +281,7 @@ class Asn1Value(object):
|
||||
cls = self.__class__
|
||||
# Allow explicit to be specified as a simple 2-element tuple
|
||||
# instead of requiring the user make a nested tuple
|
||||
if cls.explicit is not None and isinstance(cls.explicit[0], int_types):
|
||||
if cls.explicit is not None and isinstance(cls.explicit[0], int):
|
||||
cls.explicit = (cls.explicit, )
|
||||
if hasattr(cls, '_setup'):
|
||||
self._setup()
|
||||
@@ -299,7 +289,7 @@ class Asn1Value(object):
|
||||
|
||||
# Normalize tagging values
|
||||
if explicit is not None:
|
||||
if isinstance(explicit, int_types):
|
||||
if isinstance(explicit, int):
|
||||
if class_ is None:
|
||||
class_ = 'context'
|
||||
explicit = (class_, explicit)
|
||||
@@ -309,7 +299,7 @@ class Asn1Value(object):
|
||||
tag = None
|
||||
|
||||
if implicit is not None:
|
||||
if isinstance(implicit, int_types):
|
||||
if isinstance(implicit, int):
|
||||
if class_ is None:
|
||||
class_ = 'context'
|
||||
implicit = (class_, implicit)
|
||||
@@ -336,11 +326,11 @@ class Asn1Value(object):
|
||||
|
||||
if explicit is not None:
|
||||
# Ensure we have a tuple of 2-element tuples
|
||||
if len(explicit) == 2 and isinstance(explicit[1], int_types):
|
||||
if len(explicit) == 2 and isinstance(explicit[1], int):
|
||||
explicit = (explicit, )
|
||||
for class_, tag in explicit:
|
||||
invalid_class = None
|
||||
if isinstance(class_, int_types):
|
||||
if isinstance(class_, int):
|
||||
if class_ not in CLASS_NUM_TO_NAME_MAP:
|
||||
invalid_class = class_
|
||||
else:
|
||||
@@ -356,7 +346,7 @@ class Asn1Value(object):
|
||||
repr(invalid_class)
|
||||
))
|
||||
if tag is not None:
|
||||
if not isinstance(tag, int_types):
|
||||
if not isinstance(tag, int):
|
||||
raise TypeError(unwrap(
|
||||
'''
|
||||
explicit tag must be an integer, not %s
|
||||
@@ -379,7 +369,7 @@ class Asn1Value(object):
|
||||
repr(class_)
|
||||
))
|
||||
if tag is not None:
|
||||
if not isinstance(tag, int_types):
|
||||
if not isinstance(tag, int):
|
||||
raise TypeError(unwrap(
|
||||
'''
|
||||
implicit tag must be an integer, not %s
|
||||
@@ -445,10 +435,7 @@ class Asn1Value(object):
|
||||
A unicode string
|
||||
"""
|
||||
|
||||
if _PY2:
|
||||
return self.__bytes__()
|
||||
else:
|
||||
return self.__unicode__()
|
||||
return self.__unicode__()
|
||||
|
||||
def __repr__(self):
|
||||
"""
|
||||
@@ -456,10 +443,7 @@ class Asn1Value(object):
|
||||
A unicode string
|
||||
"""
|
||||
|
||||
if _PY2:
|
||||
return '<%s %s b%s>' % (type_name(self), id(self), repr(self.dump()))
|
||||
else:
|
||||
return '<%s %s %s>' % (type_name(self), id(self), repr(self.dump()))
|
||||
return '<%s %s %s>' % (type_name(self), id(self), repr(self.dump()))
|
||||
|
||||
def __bytes__(self):
|
||||
"""
|
||||
@@ -609,10 +593,7 @@ class Asn1Value(object):
|
||||
elif hasattr(self, 'chosen'):
|
||||
self.chosen.debug(nest_level + 2)
|
||||
else:
|
||||
if _PY2 and isinstance(self.native, byte_cls):
|
||||
print('%s Native: b%s' % (prefix, repr(self.native)))
|
||||
else:
|
||||
print('%s Native: %s' % (prefix, self.native))
|
||||
print('%s Native: %s' % (prefix, self.native))
|
||||
|
||||
def dump(self, force=False):
|
||||
"""
|
||||
@@ -1058,7 +1039,7 @@ class Choice(Asn1Value):
|
||||
A instance of the current class
|
||||
"""
|
||||
|
||||
if not isinstance(encoded_data, byte_cls):
|
||||
if not isinstance(encoded_data, bytes):
|
||||
raise TypeError('encoded_data must be a byte string, not %s' % type_name(encoded_data))
|
||||
|
||||
value, _ = _parse_build(encoded_data, spec=cls, spec_params=kwargs, strict=strict)
|
||||
@@ -1425,17 +1406,11 @@ class Concat(object):
|
||||
|
||||
def __str__(self):
|
||||
"""
|
||||
Since str is different in Python 2 and 3, this calls the appropriate
|
||||
method, __unicode__() or __bytes__()
|
||||
|
||||
:return:
|
||||
A unicode string
|
||||
"""
|
||||
|
||||
if _PY2:
|
||||
return self.__bytes__()
|
||||
else:
|
||||
return self.__unicode__()
|
||||
return self.__unicode__()
|
||||
|
||||
def __bytes__(self):
|
||||
"""
|
||||
@@ -1684,7 +1659,7 @@ class Primitive(Asn1Value):
|
||||
A byte string
|
||||
"""
|
||||
|
||||
if not isinstance(value, byte_cls):
|
||||
if not isinstance(value, bytes):
|
||||
raise TypeError(unwrap(
|
||||
'''
|
||||
%s value must be a byte string, not %s
|
||||
@@ -1784,7 +1759,7 @@ class AbstractString(Constructable, Primitive):
|
||||
A unicode string
|
||||
"""
|
||||
|
||||
if not isinstance(value, str_cls):
|
||||
if not isinstance(value, str):
|
||||
raise TypeError(unwrap(
|
||||
'''
|
||||
%s value must be a unicode string, not %s
|
||||
@@ -1915,7 +1890,7 @@ class Integer(Primitive, ValueMap):
|
||||
ValueError - when an invalid value is passed
|
||||
"""
|
||||
|
||||
if isinstance(value, str_cls):
|
||||
if isinstance(value, str):
|
||||
if self._map is None:
|
||||
raise ValueError(unwrap(
|
||||
'''
|
||||
@@ -1935,7 +1910,7 @@ class Integer(Primitive, ValueMap):
|
||||
|
||||
value = self._reverse_map[value]
|
||||
|
||||
elif not isinstance(value, int_types):
|
||||
elif not isinstance(value, int):
|
||||
raise TypeError(unwrap(
|
||||
'''
|
||||
%s value must be an integer or unicode string when a name_map
|
||||
@@ -2004,7 +1979,7 @@ class _IntegerBitString(object):
|
||||
# return an empty chunk, for cases like \x23\x80\x00\x00
|
||||
return []
|
||||
|
||||
unused_bits_len = ord(self.contents[0]) if _PY2 else self.contents[0]
|
||||
unused_bits_len = self.contents[0]
|
||||
value = int_from_bytes(self.contents[1:])
|
||||
bits = (len(self.contents) - 1) * 8
|
||||
|
||||
@@ -2135,7 +2110,7 @@ class BitString(_IntegerBitString, Constructable, Castable, Primitive, ValueMap)
|
||||
if key in value:
|
||||
bits[index] = 1
|
||||
|
||||
value = ''.join(map(str_cls, bits))
|
||||
value = ''.join(map(str, bits))
|
||||
|
||||
elif value.__class__ == tuple:
|
||||
if self._map is None:
|
||||
@@ -2146,7 +2121,7 @@ class BitString(_IntegerBitString, Constructable, Castable, Primitive, ValueMap)
|
||||
if bit:
|
||||
name = self._map.get(index, index)
|
||||
self._native.add(name)
|
||||
value = ''.join(map(str_cls, value))
|
||||
value = ''.join(map(str, value))
|
||||
|
||||
else:
|
||||
raise TypeError(unwrap(
|
||||
@@ -2220,7 +2195,7 @@ class BitString(_IntegerBitString, Constructable, Castable, Primitive, ValueMap)
|
||||
A boolean if the bit is set
|
||||
"""
|
||||
|
||||
is_int = isinstance(key, int_types)
|
||||
is_int = isinstance(key, int)
|
||||
if not is_int:
|
||||
if not isinstance(self._map, dict):
|
||||
raise ValueError(unwrap(
|
||||
@@ -2266,7 +2241,7 @@ class BitString(_IntegerBitString, Constructable, Castable, Primitive, ValueMap)
|
||||
ValueError - when _map is not set or the key name is invalid
|
||||
"""
|
||||
|
||||
is_int = isinstance(key, int_types)
|
||||
is_int = isinstance(key, int)
|
||||
if not is_int:
|
||||
if self._map is None:
|
||||
raise ValueError(unwrap(
|
||||
@@ -2333,8 +2308,8 @@ class BitString(_IntegerBitString, Constructable, Castable, Primitive, ValueMap)
|
||||
if self._map:
|
||||
self._native = set()
|
||||
for index, bit in enumerate(bits):
|
||||
if bit:
|
||||
name = self._map.get(index, index)
|
||||
if bit and index in self._map:
|
||||
name = self._map.get(index)
|
||||
self._native.add(name)
|
||||
else:
|
||||
self._native = bits
|
||||
@@ -2365,7 +2340,7 @@ class OctetBitString(Constructable, Castable, Primitive):
|
||||
ValueError - when an invalid value is passed
|
||||
"""
|
||||
|
||||
if not isinstance(value, byte_cls):
|
||||
if not isinstance(value, bytes):
|
||||
raise TypeError(unwrap(
|
||||
'''
|
||||
%s value must be a byte string, not %s
|
||||
@@ -2435,7 +2410,7 @@ class OctetBitString(Constructable, Castable, Primitive):
|
||||
List with one tuple, consisting of a byte string and an integer (unused bits)
|
||||
"""
|
||||
|
||||
unused_bits_len = ord(self.contents[0]) if _PY2 else self.contents[0]
|
||||
unused_bits_len = self.contents[0]
|
||||
if not unused_bits_len:
|
||||
return [(self.contents[1:], ())]
|
||||
|
||||
@@ -2448,11 +2423,11 @@ class OctetBitString(Constructable, Castable, Primitive):
|
||||
raise ValueError('Bit string has {0} unused bits'.format(unused_bits_len))
|
||||
|
||||
mask = (1 << unused_bits_len) - 1
|
||||
last_byte = ord(self.contents[-1]) if _PY2 else self.contents[-1]
|
||||
last_byte = self.contents[-1]
|
||||
|
||||
# zero out the unused bits in the last byte.
|
||||
zeroed_byte = last_byte & ~mask
|
||||
value = self.contents[1:-1] + (chr(zeroed_byte) if _PY2 else bytes((zeroed_byte,)))
|
||||
value = self.contents[1:-1] + bytes((zeroed_byte,))
|
||||
|
||||
unused_bits = _int_to_bit_tuple(last_byte & mask, unused_bits_len)
|
||||
|
||||
@@ -2505,7 +2480,7 @@ class IntegerBitString(_IntegerBitString, Constructable, Castable, Primitive):
|
||||
ValueError - when an invalid value is passed
|
||||
"""
|
||||
|
||||
if not isinstance(value, int_types):
|
||||
if not isinstance(value, int):
|
||||
raise TypeError(unwrap(
|
||||
'''
|
||||
%s value must be a positive integer, not %s
|
||||
@@ -2570,7 +2545,7 @@ class OctetString(Constructable, Castable, Primitive):
|
||||
A byte string
|
||||
"""
|
||||
|
||||
if not isinstance(value, byte_cls):
|
||||
if not isinstance(value, bytes):
|
||||
raise TypeError(unwrap(
|
||||
'''
|
||||
%s value must be a byte string, not %s
|
||||
@@ -2654,7 +2629,7 @@ class IntegerOctetString(Constructable, Castable, Primitive):
|
||||
ValueError - when an invalid value is passed
|
||||
"""
|
||||
|
||||
if not isinstance(value, int_types):
|
||||
if not isinstance(value, int):
|
||||
raise TypeError(unwrap(
|
||||
'''
|
||||
%s value must be a positive integer, not %s
|
||||
@@ -2752,7 +2727,7 @@ class ParsableOctetString(Constructable, Castable, Primitive):
|
||||
A byte string
|
||||
"""
|
||||
|
||||
if not isinstance(value, byte_cls):
|
||||
if not isinstance(value, bytes):
|
||||
raise TypeError(unwrap(
|
||||
'''
|
||||
%s value must be a byte string, not %s
|
||||
@@ -2904,7 +2879,7 @@ class ParsableOctetBitString(ParsableOctetString):
|
||||
ValueError - when an invalid value is passed
|
||||
"""
|
||||
|
||||
if not isinstance(value, byte_cls):
|
||||
if not isinstance(value, bytes):
|
||||
raise TypeError(unwrap(
|
||||
'''
|
||||
%s value must be a byte string, not %s
|
||||
@@ -2934,7 +2909,7 @@ class ParsableOctetBitString(ParsableOctetString):
|
||||
A byte string
|
||||
"""
|
||||
|
||||
unused_bits_len = ord(self.contents[0]) if _PY2 else self.contents[0]
|
||||
unused_bits_len = self.contents[0]
|
||||
if unused_bits_len:
|
||||
raise ValueError('ParsableOctetBitString should have no unused bits')
|
||||
|
||||
@@ -3007,7 +2982,7 @@ class ObjectIdentifier(Primitive, ValueMap):
|
||||
type_name(cls)
|
||||
))
|
||||
|
||||
if not isinstance(value, str_cls):
|
||||
if not isinstance(value, str):
|
||||
raise TypeError(unwrap(
|
||||
'''
|
||||
value must be a unicode string, not %s
|
||||
@@ -3045,7 +3020,7 @@ class ObjectIdentifier(Primitive, ValueMap):
|
||||
type_name(cls)
|
||||
))
|
||||
|
||||
if not isinstance(value, str_cls):
|
||||
if not isinstance(value, str):
|
||||
raise TypeError(unwrap(
|
||||
'''
|
||||
value must be a unicode string, not %s
|
||||
@@ -3079,7 +3054,7 @@ class ObjectIdentifier(Primitive, ValueMap):
|
||||
ValueError - when an invalid value is passed
|
||||
"""
|
||||
|
||||
if not isinstance(value, str_cls):
|
||||
if not isinstance(value, str):
|
||||
raise TypeError(unwrap(
|
||||
'''
|
||||
%s value must be a unicode string, not %s
|
||||
@@ -3153,24 +3128,22 @@ class ObjectIdentifier(Primitive, ValueMap):
|
||||
|
||||
part = 0
|
||||
for byte in self.contents:
|
||||
if _PY2:
|
||||
byte = ord(byte)
|
||||
part = part * 128
|
||||
part += byte & 127
|
||||
# Last byte in subidentifier has the eighth bit set to 0
|
||||
if byte & 0x80 == 0:
|
||||
if len(output) == 0:
|
||||
if part >= 80:
|
||||
output.append(str_cls(2))
|
||||
output.append(str_cls(part - 80))
|
||||
output.append(str(2))
|
||||
output.append(str(part - 80))
|
||||
elif part >= 40:
|
||||
output.append(str_cls(1))
|
||||
output.append(str_cls(part - 40))
|
||||
output.append(str(1))
|
||||
output.append(str(part - 40))
|
||||
else:
|
||||
output.append(str_cls(0))
|
||||
output.append(str_cls(part))
|
||||
output.append(str(0))
|
||||
output.append(str(part))
|
||||
else:
|
||||
output.append(str_cls(part))
|
||||
output.append(str(part))
|
||||
part = 0
|
||||
|
||||
self._dotted = '.'.join(output)
|
||||
@@ -3240,7 +3213,7 @@ class Enumerated(Integer):
|
||||
ValueError - when an invalid value is passed
|
||||
"""
|
||||
|
||||
if not isinstance(value, int_types) and not isinstance(value, str_cls):
|
||||
if not isinstance(value, int) and not isinstance(value, str):
|
||||
raise TypeError(unwrap(
|
||||
'''
|
||||
%s value must be an integer or a unicode string, not %s
|
||||
@@ -3249,7 +3222,7 @@ class Enumerated(Integer):
|
||||
type_name(value)
|
||||
))
|
||||
|
||||
if isinstance(value, str_cls):
|
||||
if isinstance(value, str):
|
||||
if value not in self._reverse_map:
|
||||
raise ValueError(unwrap(
|
||||
'''
|
||||
@@ -3507,7 +3480,7 @@ class Sequence(Asn1Value):
|
||||
if self.children is None:
|
||||
self._parse_children()
|
||||
|
||||
if not isinstance(key, int_types):
|
||||
if not isinstance(key, int):
|
||||
if key not in self._field_map:
|
||||
raise KeyError(unwrap(
|
||||
'''
|
||||
@@ -3554,7 +3527,7 @@ class Sequence(Asn1Value):
|
||||
if self.children is None:
|
||||
self._parse_children()
|
||||
|
||||
if not isinstance(key, int_types):
|
||||
if not isinstance(key, int):
|
||||
if key not in self._field_map:
|
||||
raise KeyError(unwrap(
|
||||
'''
|
||||
@@ -3605,7 +3578,7 @@ class Sequence(Asn1Value):
|
||||
if self.children is None:
|
||||
self._parse_children()
|
||||
|
||||
if not isinstance(key, int_types):
|
||||
if not isinstance(key, int):
|
||||
if key not in self._field_map:
|
||||
raise KeyError(unwrap(
|
||||
'''
|
||||
@@ -4003,7 +3976,7 @@ class Sequence(Asn1Value):
|
||||
encoded using
|
||||
"""
|
||||
|
||||
if not isinstance(field_name, str_cls):
|
||||
if not isinstance(field_name, str):
|
||||
raise TypeError(unwrap(
|
||||
'''
|
||||
field_name must be a unicode string, not %s
|
||||
@@ -4051,7 +4024,7 @@ class Sequence(Asn1Value):
|
||||
try:
|
||||
name = self._fields[index][0]
|
||||
except (IndexError):
|
||||
name = str_cls(index)
|
||||
name = str(index)
|
||||
self._native[name] = child.native
|
||||
except (ValueError, TypeError) as e:
|
||||
self._native = None
|
||||
@@ -4879,7 +4852,7 @@ class AbstractTime(AbstractString):
|
||||
A dict with the parsed values
|
||||
"""
|
||||
|
||||
string = str_cls(self)
|
||||
string = str(self)
|
||||
|
||||
m = self._TIMESTRING_RE.match(string)
|
||||
if not m:
|
||||
@@ -5018,8 +4991,6 @@ class UTCTime(AbstractTime):
|
||||
raise ValueError('Year of the UTCTime is not in range [1950, 2049], use GeneralizedTime instead')
|
||||
|
||||
value = value.strftime('%y%m%d%H%M%SZ')
|
||||
if _PY2:
|
||||
value = value.decode('ascii')
|
||||
|
||||
AbstractString.set(self, value)
|
||||
# Set it to None and let the class take care of converting the next
|
||||
@@ -5117,8 +5088,6 @@ class GeneralizedTime(AbstractTime):
|
||||
fraction = ''
|
||||
|
||||
value = value.strftime('%Y%m%d%H%M%S') + fraction + 'Z'
|
||||
if _PY2:
|
||||
value = value.decode('ascii')
|
||||
|
||||
AbstractString.set(self, value)
|
||||
# Set it to None and let the class take care of converting the next
|
||||
@@ -5340,7 +5309,7 @@ def _build_id_tuple(params, spec):
|
||||
else:
|
||||
required_class = 2
|
||||
required_tag = params['implicit']
|
||||
if required_class is not None and not isinstance(required_class, int_types):
|
||||
if required_class is not None and not isinstance(required_class, int):
|
||||
required_class = CLASS_NAME_TO_NUM_MAP[required_class]
|
||||
|
||||
required_class = params.get('class_', required_class)
|
||||
|
||||
@@ -20,7 +20,7 @@ import hashlib
|
||||
import math
|
||||
|
||||
from ._errors import unwrap, APIException
|
||||
from ._types import type_name, byte_cls
|
||||
from ._types import type_name
|
||||
from .algos import _ForceNullParameters, DigestAlgorithm, EncryptionAlgorithm, RSAESOAEPParams, RSASSAPSSParams
|
||||
from .core import (
|
||||
Any,
|
||||
@@ -582,7 +582,7 @@ class ECPrivateKey(Sequence):
|
||||
if self._key_size is None:
|
||||
# Infer the key_size from the existing private key if possible
|
||||
pkey_contents = self['private_key'].contents
|
||||
if isinstance(pkey_contents, byte_cls) and len(pkey_contents) > 1:
|
||||
if isinstance(pkey_contents, bytes) and len(pkey_contents) > 1:
|
||||
self.set_key_size(len(self['private_key'].contents))
|
||||
|
||||
elif self._key_size is not None:
|
||||
@@ -744,7 +744,7 @@ class PrivateKeyInfo(Sequence):
|
||||
A PrivateKeyInfo object
|
||||
"""
|
||||
|
||||
if not isinstance(private_key, byte_cls) and not isinstance(private_key, Asn1Value):
|
||||
if not isinstance(private_key, bytes) and not isinstance(private_key, Asn1Value):
|
||||
raise TypeError(unwrap(
|
||||
'''
|
||||
private_key must be a byte string or Asn1Value, not %s
|
||||
@@ -1112,7 +1112,7 @@ class PublicKeyInfo(Sequence):
|
||||
A PublicKeyInfo object
|
||||
"""
|
||||
|
||||
if not isinstance(public_key, byte_cls) and not isinstance(public_key, Asn1Value):
|
||||
if not isinstance(public_key, bytes) and not isinstance(public_key, Asn1Value):
|
||||
raise TypeError(unwrap(
|
||||
'''
|
||||
public_key must be a byte string or Asn1Value, not %s
|
||||
@@ -1268,7 +1268,7 @@ class PublicKeyInfo(Sequence):
|
||||
"""
|
||||
|
||||
if self._sha1 is None:
|
||||
self._sha1 = hashlib.sha1(byte_cls(self['public_key'])).digest()
|
||||
self._sha1 = hashlib.sha1(bytes(self['public_key'])).digest()
|
||||
return self._sha1
|
||||
|
||||
@property
|
||||
@@ -1279,7 +1279,7 @@ class PublicKeyInfo(Sequence):
|
||||
"""
|
||||
|
||||
if self._sha256 is None:
|
||||
self._sha256 = hashlib.sha256(byte_cls(self['public_key'])).digest()
|
||||
self._sha256 = hashlib.sha256(bytes(self['public_key'])).digest()
|
||||
return self._sha256
|
||||
|
||||
@property
|
||||
|
||||
@@ -15,10 +15,9 @@ from __future__ import unicode_literals, division, absolute_import, print_functi
|
||||
|
||||
import sys
|
||||
|
||||
from ._types import byte_cls, chr_cls, type_name
|
||||
from ._types import chr_cls, type_name
|
||||
from .util import int_from_bytes, int_to_bytes
|
||||
|
||||
_PY2 = sys.version_info <= (3,)
|
||||
_INSUFFICIENT_DATA_MESSAGE = 'Insufficient data - %s bytes requested but only %s available'
|
||||
_MAX_DEPTH = 10
|
||||
|
||||
@@ -66,7 +65,7 @@ def emit(class_, method, tag, contents):
|
||||
if tag < 0:
|
||||
raise ValueError('tag must be greater than zero, not %s' % tag)
|
||||
|
||||
if not isinstance(contents, byte_cls):
|
||||
if not isinstance(contents, bytes):
|
||||
raise TypeError('contents must be a byte string, not %s' % type_name(contents))
|
||||
|
||||
return _dump_header(class_, method, tag, contents) + contents
|
||||
@@ -101,7 +100,7 @@ def parse(contents, strict=False):
|
||||
- 5: byte string trailer
|
||||
"""
|
||||
|
||||
if not isinstance(contents, byte_cls):
|
||||
if not isinstance(contents, bytes):
|
||||
raise TypeError('contents must be a byte string, not %s' % type_name(contents))
|
||||
|
||||
contents_len = len(contents)
|
||||
@@ -130,7 +129,7 @@ def peek(contents):
|
||||
An integer with the number of bytes occupied by the ASN.1 value
|
||||
"""
|
||||
|
||||
if not isinstance(contents, byte_cls):
|
||||
if not isinstance(contents, bytes):
|
||||
raise TypeError('contents must be a byte string, not %s' % type_name(contents))
|
||||
|
||||
info, consumed = _parse(contents, len(contents))
|
||||
@@ -171,7 +170,7 @@ def _parse(encoded_data, data_len, pointer=0, lengths_only=False, depth=0):
|
||||
|
||||
if data_len < pointer + 1:
|
||||
raise ValueError(_INSUFFICIENT_DATA_MESSAGE % (1, data_len - pointer))
|
||||
first_octet = ord(encoded_data[pointer]) if _PY2 else encoded_data[pointer]
|
||||
first_octet = encoded_data[pointer]
|
||||
|
||||
pointer += 1
|
||||
|
||||
@@ -183,7 +182,7 @@ def _parse(encoded_data, data_len, pointer=0, lengths_only=False, depth=0):
|
||||
while True:
|
||||
if data_len < pointer + 1:
|
||||
raise ValueError(_INSUFFICIENT_DATA_MESSAGE % (1, data_len - pointer))
|
||||
num = ord(encoded_data[pointer]) if _PY2 else encoded_data[pointer]
|
||||
num = encoded_data[pointer]
|
||||
pointer += 1
|
||||
if num == 0x80 and tag == 0:
|
||||
raise ValueError('Non-minimal tag encoding')
|
||||
@@ -196,7 +195,7 @@ def _parse(encoded_data, data_len, pointer=0, lengths_only=False, depth=0):
|
||||
|
||||
if data_len < pointer + 1:
|
||||
raise ValueError(_INSUFFICIENT_DATA_MESSAGE % (1, data_len - pointer))
|
||||
length_octet = ord(encoded_data[pointer]) if _PY2 else encoded_data[pointer]
|
||||
length_octet = encoded_data[pointer]
|
||||
pointer += 1
|
||||
trailer = b''
|
||||
|
||||
|
||||
@@ -11,17 +11,13 @@ Encoding DER to PEM and decoding PEM to DER. Exports the following items:
|
||||
|
||||
from __future__ import unicode_literals, division, absolute_import, print_function
|
||||
|
||||
from io import BytesIO
|
||||
import base64
|
||||
import re
|
||||
import sys
|
||||
|
||||
from ._errors import unwrap
|
||||
from ._types import type_name as _type_name, str_cls, byte_cls
|
||||
from ._types import type_name as _type_name
|
||||
|
||||
if sys.version_info < (3,):
|
||||
from cStringIO import StringIO as BytesIO
|
||||
else:
|
||||
from io import BytesIO
|
||||
|
||||
|
||||
def detect(byte_string):
|
||||
@@ -36,7 +32,7 @@ def detect(byte_string):
|
||||
string
|
||||
"""
|
||||
|
||||
if not isinstance(byte_string, byte_cls):
|
||||
if not isinstance(byte_string, bytes):
|
||||
raise TypeError(unwrap(
|
||||
'''
|
||||
byte_string must be a byte string, not %s
|
||||
@@ -67,14 +63,14 @@ def armor(type_name, der_bytes, headers=None):
|
||||
A byte string of the PEM block
|
||||
"""
|
||||
|
||||
if not isinstance(der_bytes, byte_cls):
|
||||
if not isinstance(der_bytes, bytes):
|
||||
raise TypeError(unwrap(
|
||||
'''
|
||||
der_bytes must be a byte string, not %s
|
||||
''' % _type_name(der_bytes)
|
||||
))
|
||||
|
||||
if not isinstance(type_name, str_cls):
|
||||
if not isinstance(type_name, str):
|
||||
raise TypeError(unwrap(
|
||||
'''
|
||||
type_name must be a unicode string, not %s
|
||||
@@ -127,7 +123,7 @@ def _unarmor(pem_bytes):
|
||||
in the form "Name: Value" that are right after the begin line.
|
||||
"""
|
||||
|
||||
if not isinstance(pem_bytes, byte_cls):
|
||||
if not isinstance(pem_bytes, bytes):
|
||||
raise TypeError(unwrap(
|
||||
'''
|
||||
pem_bytes must be a byte string, not %s
|
||||
|
||||
@@ -20,11 +20,11 @@ from __future__ import unicode_literals, division, absolute_import, print_functi
|
||||
|
||||
import math
|
||||
import sys
|
||||
from datetime import datetime, date, timedelta, tzinfo
|
||||
from collections import OrderedDict
|
||||
from datetime import datetime, date, timedelta, timezone, tzinfo
|
||||
|
||||
from ._errors import unwrap
|
||||
from ._iri import iri_to_uri, uri_to_iri # noqa
|
||||
from ._ordereddict import OrderedDict # noqa
|
||||
from ._types import type_name
|
||||
|
||||
if sys.platform == 'win32':
|
||||
@@ -33,230 +33,53 @@ else:
|
||||
from socket import inet_ntop, inet_pton # noqa
|
||||
|
||||
|
||||
# Python 2
|
||||
if sys.version_info <= (3,):
|
||||
|
||||
def int_to_bytes(value, signed=False, width=None):
|
||||
"""
|
||||
Converts an integer to a byte string
|
||||
def int_to_bytes(value, signed=False, width=None):
|
||||
"""
|
||||
Converts an integer to a byte string
|
||||
|
||||
:param value:
|
||||
The integer to convert
|
||||
:param value:
|
||||
The integer to convert
|
||||
|
||||
:param signed:
|
||||
If the byte string should be encoded using two's complement
|
||||
:param signed:
|
||||
If the byte string should be encoded using two's complement
|
||||
|
||||
:param width:
|
||||
If None, the minimal possible size (but at least 1),
|
||||
otherwise an integer of the byte width for the return value
|
||||
:param width:
|
||||
If None, the minimal possible size (but at least 1),
|
||||
otherwise an integer of the byte width for the return value
|
||||
|
||||
:return:
|
||||
A byte string
|
||||
"""
|
||||
:return:
|
||||
A byte string
|
||||
"""
|
||||
|
||||
if value == 0 and width == 0:
|
||||
return b''
|
||||
|
||||
# Handle negatives in two's complement
|
||||
is_neg = False
|
||||
if signed and value < 0:
|
||||
is_neg = True
|
||||
bits = int(math.ceil(len('%x' % abs(value)) / 2.0) * 8)
|
||||
value = (value + (1 << bits)) % (1 << bits)
|
||||
|
||||
hex_str = '%x' % value
|
||||
if len(hex_str) & 1:
|
||||
hex_str = '0' + hex_str
|
||||
|
||||
output = hex_str.decode('hex')
|
||||
|
||||
if signed and not is_neg and ord(output[0:1]) & 0x80:
|
||||
output = b'\x00' + output
|
||||
|
||||
if width is not None:
|
||||
if len(output) > width:
|
||||
raise OverflowError('int too big to convert')
|
||||
if is_neg:
|
||||
pad_char = b'\xFF'
|
||||
else:
|
||||
pad_char = b'\x00'
|
||||
output = (pad_char * (width - len(output))) + output
|
||||
elif is_neg and ord(output[0:1]) & 0x80 == 0:
|
||||
output = b'\xFF' + output
|
||||
|
||||
return output
|
||||
|
||||
def int_from_bytes(value, signed=False):
|
||||
"""
|
||||
Converts a byte string to an integer
|
||||
|
||||
:param value:
|
||||
The byte string to convert
|
||||
|
||||
:param signed:
|
||||
If the byte string should be interpreted using two's complement
|
||||
|
||||
:return:
|
||||
An integer
|
||||
"""
|
||||
|
||||
if value == b'':
|
||||
return 0
|
||||
|
||||
num = long(value.encode("hex"), 16) # noqa
|
||||
|
||||
if not signed:
|
||||
return num
|
||||
|
||||
# Check for sign bit and handle two's complement
|
||||
if ord(value[0:1]) & 0x80:
|
||||
bit_len = len(value) * 8
|
||||
return num - (1 << bit_len)
|
||||
|
||||
return num
|
||||
|
||||
class timezone(tzinfo): # noqa
|
||||
"""
|
||||
Implements datetime.timezone for py2.
|
||||
Only full minute offsets are supported.
|
||||
DST is not supported.
|
||||
"""
|
||||
|
||||
def __init__(self, offset, name=None):
|
||||
"""
|
||||
:param offset:
|
||||
A timedelta with this timezone's offset from UTC
|
||||
|
||||
:param name:
|
||||
Name of the timezone; if None, generate one.
|
||||
"""
|
||||
|
||||
if not timedelta(hours=-24) < offset < timedelta(hours=24):
|
||||
raise ValueError('Offset must be in [-23:59, 23:59]')
|
||||
|
||||
if offset.seconds % 60 or offset.microseconds:
|
||||
raise ValueError('Offset must be full minutes')
|
||||
|
||||
self._offset = offset
|
||||
|
||||
if name is not None:
|
||||
self._name = name
|
||||
elif not offset:
|
||||
self._name = 'UTC'
|
||||
else:
|
||||
self._name = 'UTC' + _format_offset(offset)
|
||||
|
||||
def __eq__(self, other):
|
||||
"""
|
||||
Compare two timezones
|
||||
|
||||
:param other:
|
||||
The other timezone to compare to
|
||||
|
||||
:return:
|
||||
A boolean
|
||||
"""
|
||||
|
||||
if type(other) != timezone:
|
||||
return False
|
||||
return self._offset == other._offset
|
||||
|
||||
def __getinitargs__(self):
|
||||
"""
|
||||
Called by tzinfo.__reduce__ to support pickle and copy.
|
||||
|
||||
:return:
|
||||
offset and name, to be used for __init__
|
||||
"""
|
||||
|
||||
return self._offset, self._name
|
||||
|
||||
def tzname(self, dt):
|
||||
"""
|
||||
:param dt:
|
||||
A datetime object; ignored.
|
||||
|
||||
:return:
|
||||
Name of this timezone
|
||||
"""
|
||||
|
||||
return self._name
|
||||
|
||||
def utcoffset(self, dt):
|
||||
"""
|
||||
:param dt:
|
||||
A datetime object; ignored.
|
||||
|
||||
:return:
|
||||
A timedelta object with the offset from UTC
|
||||
"""
|
||||
|
||||
return self._offset
|
||||
|
||||
def dst(self, dt):
|
||||
"""
|
||||
:param dt:
|
||||
A datetime object; ignored.
|
||||
|
||||
:return:
|
||||
Zero timedelta
|
||||
"""
|
||||
|
||||
return timedelta(0)
|
||||
|
||||
timezone.utc = timezone(timedelta(0))
|
||||
|
||||
# Python 3
|
||||
else:
|
||||
|
||||
from datetime import timezone # noqa
|
||||
|
||||
def int_to_bytes(value, signed=False, width=None):
|
||||
"""
|
||||
Converts an integer to a byte string
|
||||
|
||||
:param value:
|
||||
The integer to convert
|
||||
|
||||
:param signed:
|
||||
If the byte string should be encoded using two's complement
|
||||
|
||||
:param width:
|
||||
If None, the minimal possible size (but at least 1),
|
||||
otherwise an integer of the byte width for the return value
|
||||
|
||||
:return:
|
||||
A byte string
|
||||
"""
|
||||
|
||||
if width is None:
|
||||
if signed:
|
||||
if value < 0:
|
||||
bits_required = abs(value + 1).bit_length()
|
||||
else:
|
||||
bits_required = value.bit_length()
|
||||
if bits_required % 8 == 0:
|
||||
bits_required += 1
|
||||
if width is None:
|
||||
if signed:
|
||||
if value < 0:
|
||||
bits_required = abs(value + 1).bit_length()
|
||||
else:
|
||||
bits_required = value.bit_length()
|
||||
width = math.ceil(bits_required / 8) or 1
|
||||
return value.to_bytes(width, byteorder='big', signed=signed)
|
||||
if bits_required % 8 == 0:
|
||||
bits_required += 1
|
||||
else:
|
||||
bits_required = value.bit_length()
|
||||
width = math.ceil(bits_required / 8) or 1
|
||||
return value.to_bytes(width, byteorder='big', signed=signed)
|
||||
|
||||
def int_from_bytes(value, signed=False):
|
||||
"""
|
||||
Converts a byte string to an integer
|
||||
def int_from_bytes(value, signed=False):
|
||||
"""
|
||||
Converts a byte string to an integer
|
||||
|
||||
:param value:
|
||||
The byte string to convert
|
||||
:param value:
|
||||
The byte string to convert
|
||||
|
||||
:param signed:
|
||||
If the byte string should be interpreted using two's complement
|
||||
:param signed:
|
||||
If the byte string should be interpreted using two's complement
|
||||
|
||||
:return:
|
||||
An integer
|
||||
"""
|
||||
:return:
|
||||
An integer
|
||||
"""
|
||||
|
||||
return int.from_bytes(value, 'big', signed=signed)
|
||||
return int.from_bytes(value, 'big', signed=signed)
|
||||
|
||||
|
||||
def _format_offset(off):
|
||||
|
||||
@@ -15,6 +15,7 @@ Other type classes are defined that help compose the types listed above.
|
||||
|
||||
from __future__ import unicode_literals, division, absolute_import, print_function
|
||||
|
||||
from collections import OrderedDict
|
||||
from contextlib import contextmanager
|
||||
from encodings import idna # noqa
|
||||
import hashlib
|
||||
@@ -26,8 +27,7 @@ import unicodedata
|
||||
|
||||
from ._errors import unwrap
|
||||
from ._iri import iri_to_uri, uri_to_iri
|
||||
from ._ordereddict import OrderedDict
|
||||
from ._types import type_name, str_cls, bytes_to_list
|
||||
from ._types import type_name
|
||||
from .algos import AlgorithmIdentifier, AnyAlgorithmIdentifier, DigestAlgorithm, SignedDigestAlgorithm
|
||||
from .core import (
|
||||
Any,
|
||||
@@ -100,7 +100,7 @@ class DNSName(IA5String):
|
||||
A unicode string
|
||||
"""
|
||||
|
||||
if not isinstance(value, str_cls):
|
||||
if not isinstance(value, str):
|
||||
raise TypeError(unwrap(
|
||||
'''
|
||||
%s value must be a unicode string, not %s
|
||||
@@ -131,7 +131,7 @@ class URI(IA5String):
|
||||
A unicode string
|
||||
"""
|
||||
|
||||
if not isinstance(value, str_cls):
|
||||
if not isinstance(value, str):
|
||||
raise TypeError(unwrap(
|
||||
'''
|
||||
%s value must be a unicode string, not %s
|
||||
@@ -215,7 +215,7 @@ class EmailAddress(IA5String):
|
||||
A unicode string
|
||||
"""
|
||||
|
||||
if not isinstance(value, str_cls):
|
||||
if not isinstance(value, str):
|
||||
raise TypeError(unwrap(
|
||||
'''
|
||||
%s value must be a unicode string, not %s
|
||||
@@ -323,7 +323,7 @@ class IPAddress(OctetString):
|
||||
an IPv6 address or IPv6 address with CIDR
|
||||
"""
|
||||
|
||||
if not isinstance(value, str_cls):
|
||||
if not isinstance(value, str):
|
||||
raise TypeError(unwrap(
|
||||
'''
|
||||
%s value must be a unicode string, not %s
|
||||
@@ -413,7 +413,7 @@ class IPAddress(OctetString):
|
||||
if cidr_int is not None:
|
||||
cidr_bits = '{0:b}'.format(cidr_int)
|
||||
cidr = len(cidr_bits.rstrip('0'))
|
||||
value = value + '/' + str_cls(cidr)
|
||||
value = value + '/' + str(cidr)
|
||||
self._native = value
|
||||
return self._native
|
||||
|
||||
@@ -2598,7 +2598,7 @@ class Certificate(Sequence):
|
||||
"""
|
||||
|
||||
if self._issuer_serial is None:
|
||||
self._issuer_serial = self.issuer.sha256 + b':' + str_cls(self.serial_number).encode('ascii')
|
||||
self._issuer_serial = self.issuer.sha256 + b':' + str(self.serial_number).encode('ascii')
|
||||
return self._issuer_serial
|
||||
|
||||
@property
|
||||
@@ -2647,7 +2647,7 @@ class Certificate(Sequence):
|
||||
# We untag the element since it is tagged via being a choice from GeneralName
|
||||
issuer = issuer.untag()
|
||||
authority_serial = self.authority_key_identifier_value['authority_cert_serial_number'].native
|
||||
self._authority_issuer_serial = issuer.sha256 + b':' + str_cls(authority_serial).encode('ascii')
|
||||
self._authority_issuer_serial = issuer.sha256 + b':' + str(authority_serial).encode('ascii')
|
||||
else:
|
||||
self._authority_issuer_serial = None
|
||||
return self._authority_issuer_serial
|
||||
@@ -2860,7 +2860,7 @@ class Certificate(Sequence):
|
||||
with a space between each pair of characters, all uppercase
|
||||
"""
|
||||
|
||||
return ' '.join('%02X' % c for c in bytes_to_list(self.sha1))
|
||||
return ' '.join('%02X' % c for c in list(self.sha1))
|
||||
|
||||
@property
|
||||
def sha256(self):
|
||||
@@ -2882,7 +2882,7 @@ class Certificate(Sequence):
|
||||
with a space between each pair of characters, all uppercase
|
||||
"""
|
||||
|
||||
return ' '.join('%02X' % c for c in bytes_to_list(self.sha256))
|
||||
return ' '.join('%02X' % c for c in list(self.sha256))
|
||||
|
||||
def is_valid_domain_ip(self, domain_ip):
|
||||
"""
|
||||
@@ -2896,7 +2896,7 @@ class Certificate(Sequence):
|
||||
A boolean - if the domain or IP is valid for the certificate
|
||||
"""
|
||||
|
||||
if not isinstance(domain_ip, str_cls):
|
||||
if not isinstance(domain_ip, str):
|
||||
raise TypeError(unwrap(
|
||||
'''
|
||||
domain_ip must be a unicode string, not %s
|
||||
|
||||
@@ -138,7 +138,7 @@ import jc.utils
|
||||
|
||||
class info():
|
||||
"""Provides parser metadata (version, author, etc.)"""
|
||||
version = '1.1'
|
||||
version = '1.2'
|
||||
description = '`certbot` command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -201,7 +201,7 @@ def parse(
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
cert_pattern = re.compile(r'^Found the following certs:$', re.MULTILINE)
|
||||
cert_pattern = re.compile(r'^Found the following certs:\r?$', re.MULTILINE)
|
||||
|
||||
if re.search(cert_pattern, data):
|
||||
cmd_option = 'certificates'
|
||||
|
||||
@@ -198,7 +198,7 @@ def _process(proc_data):
|
||||
|
||||
Dictionary. Structured data to conform to the schema.
|
||||
"""
|
||||
# put itmes in lists
|
||||
# put items in lists
|
||||
try:
|
||||
for entry in proc_data['schedule']:
|
||||
entry['minute'] = entry['minute'].split(',')
|
||||
|
||||
@@ -194,7 +194,7 @@ def _process(proc_data):
|
||||
|
||||
Dictionary. Structured data to conform to the schema.
|
||||
"""
|
||||
# put itmes in lists
|
||||
# put items in lists
|
||||
try:
|
||||
for entry in proc_data['schedule']:
|
||||
entry['minute'] = entry['minute'].split(',')
|
||||
|
||||
149
jc/parsers/debconf_show.py
Normal file
149
jc/parsers/debconf_show.py
Normal file
@@ -0,0 +1,149 @@
|
||||
"""jc - JSON Convert `debconf-show` command output parser
|
||||
|
||||
Usage (cli):
|
||||
|
||||
$ debconf-show onlyoffice-documentserver | jc --debconf-show
|
||||
|
||||
or
|
||||
|
||||
$ jc debconf-show onlyoffice-documentserver
|
||||
|
||||
Usage (module):
|
||||
|
||||
import jc
|
||||
result = jc.parse('debconf_show', debconf_show_command_output)
|
||||
|
||||
Schema:
|
||||
|
||||
[
|
||||
{
|
||||
"asked": boolean,
|
||||
"packagename": string,
|
||||
"name": string,
|
||||
"value": string
|
||||
}
|
||||
]
|
||||
|
||||
Examples:
|
||||
|
||||
$ debconf-show onlyoffice-documentserver | jc --debconf-show -p
|
||||
[
|
||||
{
|
||||
"asked": true,
|
||||
"packagename": "onlyoffice",
|
||||
"name": "jwt_secret",
|
||||
"value": "aL8ei2iereuzee7cuJ6Cahjah1ixee2ah"
|
||||
},
|
||||
{
|
||||
"asked": false,
|
||||
"packagename": "onlyoffice",
|
||||
"name": "db_pwd",
|
||||
"value": "(password omitted)"
|
||||
},
|
||||
{
|
||||
"asked": true,
|
||||
"packagename": "onlyoffice",
|
||||
"name": "rabbitmq_pwd",
|
||||
"value": "(password omitted)"
|
||||
},
|
||||
{
|
||||
"asked": true,
|
||||
"packagename": "onlyoffice",
|
||||
"name": "db_port",
|
||||
"value": "5432"
|
||||
},
|
||||
{
|
||||
"asked": true,
|
||||
"packagename": "onlyoffice",
|
||||
"name": "db_user",
|
||||
"value": "onlyoffice"
|
||||
},
|
||||
{
|
||||
"asked": true,
|
||||
"packagename": "onlyoffice",
|
||||
"name": "rabbitmq_proto",
|
||||
"value": "amqp"
|
||||
},
|
||||
{
|
||||
"asked": true,
|
||||
"packagename": "onlyoffice",
|
||||
"name": "cluster_mode",
|
||||
"value": "false"
|
||||
}
|
||||
]
|
||||
"""
|
||||
from typing import List, Dict
|
||||
from jc.jc_types import JSONDictType
|
||||
import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
"""Provides parser metadata (version, author, etc.)"""
|
||||
version = '1.0'
|
||||
description = '`debconf-show` command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
compatible = ['linux']
|
||||
tags = ['command']
|
||||
magic_commands = ['debconf-show']
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
|
||||
def _process(proc_data: JSONDictType) -> List[JSONDictType]:
|
||||
"""
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (Dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
List of Dictionaries. Structured to conform to the schema.
|
||||
"""
|
||||
return proc_data
|
||||
|
||||
|
||||
def parse(
|
||||
data: str,
|
||||
raw: bool = False,
|
||||
quiet: bool = False
|
||||
) -> List[JSONDictType]:
|
||||
"""
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) unprocessed output if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of Dictionaries. Raw or processed structured data.
|
||||
"""
|
||||
jc.utils.compatibility(__name__, info.compatible, quiet)
|
||||
jc.utils.input_type_check(data)
|
||||
|
||||
raw_output: List = []
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
for line in filter(None, data.splitlines()):
|
||||
output_line: Dict = {}
|
||||
splitline = line.split(':', maxsplit=1)
|
||||
|
||||
output_line['asked'] = splitline[0].startswith('*')
|
||||
packagename, key = splitline[0].split('/', maxsplit=1)
|
||||
packagename = packagename[2:]
|
||||
key = key.replace('-', '_')
|
||||
val = splitline[1].strip()
|
||||
output_line['packagename'] = packagename
|
||||
output_line['name'] = key
|
||||
output_line['value'] = val
|
||||
|
||||
raw_output.append(output_line)
|
||||
|
||||
return raw_output if raw else _process(raw_output)
|
||||
@@ -67,12 +67,13 @@ Examples:
|
||||
"_": "/usr/bin/env"
|
||||
}
|
||||
"""
|
||||
import re
|
||||
import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
"""Provides parser metadata (version, author, etc.)"""
|
||||
version = '1.4'
|
||||
version = '1.5'
|
||||
description = '`env` command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -83,6 +84,7 @@ class info():
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
VAR_DEF_PATTERN = re.compile(r'^[a-zA-Z_][a-zA-Z0-9_]*=\S*.*$')
|
||||
|
||||
def _process(proc_data):
|
||||
"""
|
||||
@@ -96,8 +98,6 @@ def _process(proc_data):
|
||||
|
||||
List of Dictionaries. Structured data to conform to the schema.
|
||||
"""
|
||||
|
||||
# rebuild output for added semantic information
|
||||
processed = []
|
||||
for k, v in proc_data.items():
|
||||
proc_line = {}
|
||||
@@ -120,24 +120,29 @@ def parse(data, raw=False, quiet=False):
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary of raw structured data or
|
||||
List of Dictionaries of processed structured data
|
||||
Dictionary of raw structured data or (default)
|
||||
List of Dictionaries of processed structured data (raw)
|
||||
"""
|
||||
jc.utils.compatibility(__name__, info.compatible, quiet)
|
||||
jc.utils.input_type_check(data)
|
||||
|
||||
raw_output = {}
|
||||
|
||||
# Clear any blank lines
|
||||
cleandata = list(filter(None, data.splitlines()))
|
||||
key = ''
|
||||
value = None
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
for line in data.splitlines():
|
||||
if VAR_DEF_PATTERN.match(line):
|
||||
if not value is None:
|
||||
raw_output[key] = value
|
||||
key, value = line.split('=', maxsplit=1)
|
||||
continue
|
||||
|
||||
for entry in cleandata:
|
||||
parsed_line = entry.split('=', maxsplit=1)
|
||||
raw_output[parsed_line[0]] = parsed_line[1]
|
||||
if not value is None:
|
||||
value = value + '\n' + line
|
||||
|
||||
if not value is None:
|
||||
raw_output[key] = value
|
||||
|
||||
return raw_output if raw else _process(raw_output)
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
else:
|
||||
return _process(raw_output)
|
||||
|
||||
137
jc/parsers/find.py
Normal file
137
jc/parsers/find.py
Normal file
@@ -0,0 +1,137 @@
|
||||
"""jc - JSON Convert `find` command output parser
|
||||
|
||||
This parser returns a list of objects by default and a list of strings if
|
||||
the `--raw` option is used.
|
||||
|
||||
Usage (cli):
|
||||
|
||||
$ find | jc --find
|
||||
|
||||
Usage (module):
|
||||
|
||||
import jc
|
||||
result = jc.parse('find', find_command_output)
|
||||
|
||||
Schema:
|
||||
|
||||
[
|
||||
{
|
||||
"path": string,
|
||||
"node": string,
|
||||
"error": string
|
||||
}
|
||||
]
|
||||
|
||||
Examples:
|
||||
|
||||
$ find | jc --find -p
|
||||
[
|
||||
{
|
||||
"path": "./directory"
|
||||
"node": "filename"
|
||||
},
|
||||
{
|
||||
"path": "./anotherdirectory"
|
||||
"node": "anotherfile"
|
||||
},
|
||||
{
|
||||
"path": null
|
||||
"node": null
|
||||
"error": "find: './inaccessible': Permission denied"
|
||||
}
|
||||
...
|
||||
]
|
||||
|
||||
$ find | jc --find -p -r
|
||||
[
|
||||
"./templates/readme_template",
|
||||
"./templates/manpage_template",
|
||||
"./.github/workflows/pythonapp.yml",
|
||||
...
|
||||
]
|
||||
"""
|
||||
import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
"""Provides parser metadata (version, author, etc.)"""
|
||||
version = '1.0'
|
||||
description = '`find` command parser'
|
||||
author = 'Solomon Leang'
|
||||
author_email = 'solomonleang@gmail.com'
|
||||
compatible = ['linux']
|
||||
tags = ['command']
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
|
||||
def _process(proc_data):
|
||||
"""
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (List of Strings) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
List of Dictionaries. Structured data to conform to the schema.
|
||||
"""
|
||||
processed = []
|
||||
|
||||
for index in proc_data:
|
||||
path, node, error = "", "", ""
|
||||
|
||||
if index == ".":
|
||||
node = "."
|
||||
elif index.startswith('find: '):
|
||||
error = index
|
||||
else:
|
||||
try:
|
||||
path, node = index.rsplit('/', maxsplit=1)
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
proc_line = {
|
||||
'path': path if path else None,
|
||||
'node': node if node else None
|
||||
}
|
||||
|
||||
if error:
|
||||
proc_line.update(
|
||||
{'error': error}
|
||||
)
|
||||
|
||||
processed.append(proc_line)
|
||||
|
||||
return processed
|
||||
|
||||
|
||||
def parse(data, raw=False, quiet=False):
|
||||
"""
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) unprocessed output if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of raw strings or
|
||||
List of Dictionaries of processed structured data
|
||||
"""
|
||||
jc.utils.compatibility(__name__, info.compatible, quiet)
|
||||
jc.utils.input_type_check(data)
|
||||
|
||||
raw_output = []
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
raw_output = data.splitlines()
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
else:
|
||||
return _process(raw_output)
|
||||
243
jc/parsers/host.py
Normal file
243
jc/parsers/host.py
Normal file
@@ -0,0 +1,243 @@
|
||||
"""jc - JSON Convert `host` command output parser
|
||||
|
||||
Supports parsing of the most commonly used RR types (A, AAAA, MX, TXT)
|
||||
|
||||
Usage (cli):
|
||||
|
||||
$ host google.com | jc --host
|
||||
|
||||
or
|
||||
|
||||
$ jc host google.com
|
||||
|
||||
Usage (module):
|
||||
|
||||
import jc
|
||||
result = jc.parse('host', host_command_output)
|
||||
|
||||
Schema:
|
||||
|
||||
[
|
||||
{
|
||||
"hostname": string,
|
||||
"address": [
|
||||
string
|
||||
],
|
||||
"v6-address": [
|
||||
string
|
||||
],
|
||||
"mail": [
|
||||
string
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
[
|
||||
{
|
||||
"nameserver": string,
|
||||
"zone": string,
|
||||
"mname": string,
|
||||
"rname": string,
|
||||
"serial": integer,
|
||||
"refresh": integer,
|
||||
"retry": integer,
|
||||
"expire": integer,
|
||||
"minimum": integer
|
||||
}
|
||||
]
|
||||
|
||||
Examples:
|
||||
|
||||
$ host google.com | jc --host
|
||||
[
|
||||
{
|
||||
"hostname": "google.com",
|
||||
"address": [
|
||||
"142.251.39.110"
|
||||
],
|
||||
"v6-address": [
|
||||
"2a00:1450:400e:811::200e"
|
||||
],
|
||||
"mail": [
|
||||
"smtp.google.com."
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
$ jc host -C sunet.se
|
||||
[
|
||||
{
|
||||
"nameserver": "2001:6b0:7::2",
|
||||
"zone": "sunet.se",
|
||||
"mname": "sunic.sunet.se.",
|
||||
"rname": "hostmaster.sunet.se.",
|
||||
"serial": "2023090401",
|
||||
"refresh": "28800",
|
||||
"retry": "7200",
|
||||
"expire": "604800",
|
||||
"minimum": "300"
|
||||
},
|
||||
{
|
||||
...
|
||||
}
|
||||
]
|
||||
"""
|
||||
from typing import Dict, List
|
||||
import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
"""Provides parser metadata (version, author, etc.)"""
|
||||
version = '1.0'
|
||||
description = '`host` command parser'
|
||||
author = 'Pettai'
|
||||
author_email = 'pettai@sunet.se'
|
||||
compatible = ['linux', 'darwin', 'cygwin', 'win32', 'aix', 'freebsd']
|
||||
tags = ['command']
|
||||
magic_commands = ['host']
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
|
||||
def _process(proc_data):
|
||||
"""
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (List of Dictionaries) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
List of Dictionaries. Structured to conform to the schema.
|
||||
"""
|
||||
|
||||
int_list = {'serial', 'refresh', 'retry', 'expire', 'minimum'}
|
||||
|
||||
for entry in proc_data:
|
||||
for key in entry:
|
||||
if key in int_list:
|
||||
entry[key] = jc.utils.convert_to_int(entry[key])
|
||||
|
||||
return proc_data
|
||||
|
||||
|
||||
def parse(data: str, raw: bool = False, quiet: bool = False):
|
||||
"""
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) unprocessed output if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of Dictionaries. Raw or processed structured data.
|
||||
"""
|
||||
jc.utils.compatibility(__name__, info.compatible, quiet)
|
||||
jc.utils.input_type_check(data)
|
||||
|
||||
raw_output: List[Dict] = []
|
||||
|
||||
warned = False
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
addresses = []
|
||||
v6addresses = []
|
||||
mail = []
|
||||
text = []
|
||||
rrdata = {}
|
||||
soaparse = False
|
||||
|
||||
for line in filter(None, data.splitlines()):
|
||||
line = line.strip()
|
||||
|
||||
# default
|
||||
if ' has address ' in line:
|
||||
linedata = line.split(' ', maxsplit=3)
|
||||
hostname = linedata[0]
|
||||
address = linedata[3]
|
||||
addresses.append(address)
|
||||
rrdata.update({'hostname': hostname})
|
||||
rrdata.update({'address': addresses})
|
||||
continue
|
||||
|
||||
if ' has IPv6 address ' in line:
|
||||
linedata = line.split(' ', maxsplit=4)
|
||||
hostname = linedata[0]
|
||||
v6address = linedata[4]
|
||||
v6addresses.append(v6address)
|
||||
rrdata.update({'hostname': hostname})
|
||||
rrdata.update({'v6-address': v6addresses})
|
||||
continue
|
||||
|
||||
if ' mail is handled by ' in line:
|
||||
linedata = line.split(' ', maxsplit=6)
|
||||
hostname = linedata[0]
|
||||
mx = linedata[6]
|
||||
mail.append(mx)
|
||||
rrdata.update({'hostname': hostname})
|
||||
rrdata.update({'mail': mail})
|
||||
continue
|
||||
|
||||
|
||||
# TXT parsing
|
||||
if ' descriptive text ' in line:
|
||||
linedata = line.split('descriptive text "', maxsplit=1)
|
||||
hostname = linedata[0]
|
||||
txt = linedata[1].strip('"')
|
||||
text.append(txt)
|
||||
rrdata.update({'hostname': hostname})
|
||||
rrdata.update({'text': text})
|
||||
continue
|
||||
|
||||
|
||||
# -C / SOA parsing
|
||||
if line.startswith('Nameserver '):
|
||||
soaparse = True
|
||||
rrdata = {}
|
||||
linedata = line.split(' ', maxsplit=1)
|
||||
nameserverip = linedata[1].rstrip(':')
|
||||
rrdata.update({'nameserver': nameserverip})
|
||||
continue
|
||||
|
||||
if ' has SOA record ' in line:
|
||||
linedata = line.split(' ', maxsplit=10)
|
||||
|
||||
zone = linedata[0]
|
||||
mname = linedata[4]
|
||||
rname = linedata[5]
|
||||
serial = linedata[6]
|
||||
refresh = linedata[7]
|
||||
retry = linedata[8]
|
||||
expire = linedata[9]
|
||||
minimum = linedata[10]
|
||||
|
||||
try:
|
||||
rrdata.update(
|
||||
{
|
||||
'zone': zone,
|
||||
'mname': mname,
|
||||
'rname': rname,
|
||||
'serial': serial,
|
||||
'refresh': refresh,
|
||||
'retry': retry,
|
||||
'expire': expire,
|
||||
'minimum': minimum
|
||||
},
|
||||
)
|
||||
raw_output.append(rrdata)
|
||||
|
||||
except IndexError:
|
||||
if not warned:
|
||||
jc.utils.warning_message(['Unknown format detected.'])
|
||||
warned = True
|
||||
|
||||
if not soaparse:
|
||||
raw_output.append(rrdata)
|
||||
|
||||
return raw_output if raw else _process(raw_output)
|
||||
689
jc/parsers/iftop.py
Normal file
689
jc/parsers/iftop.py
Normal file
@@ -0,0 +1,689 @@
|
||||
"""jc - JSON Convert `iftop` command output parser
|
||||
|
||||
Usage (cli):
|
||||
|
||||
$ iftop -i <device> -t -B -s1 | jc --iftop
|
||||
|
||||
Usage (module):
|
||||
|
||||
import jc
|
||||
result = jc.parse('iftop', iftop_command_output)
|
||||
|
||||
Schema:
|
||||
|
||||
[
|
||||
{
|
||||
"device": string,
|
||||
"ip_address": string,
|
||||
"mac_address": string,
|
||||
"clients": [
|
||||
{
|
||||
"index": integer,
|
||||
"connections": [
|
||||
{
|
||||
"host_name": string,
|
||||
"host_port": string, # can be service or missing
|
||||
"last_2s": integer,
|
||||
"last_10s": integer,
|
||||
"last_40s": integer,
|
||||
"cumulative": integer,
|
||||
"direction": string
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
"total_send_rate": {
|
||||
"last_2s": integer,
|
||||
"last_10s": integer,
|
||||
"last_40s": integer
|
||||
}
|
||||
"total_receive_rate": {
|
||||
"last_2s": integer,
|
||||
"last_10s": integer,
|
||||
"last_40s": integer
|
||||
}
|
||||
"total_send_and_receive_rate": {
|
||||
"last_2s": integer,
|
||||
"last_10s": integer,
|
||||
"last_40s": integer
|
||||
}
|
||||
"peak_rate": {
|
||||
"last_2s": integer,
|
||||
"last_10s": integer,
|
||||
"last_40s": integer
|
||||
}
|
||||
"cumulative_rate": {
|
||||
"last_2s": integer,
|
||||
"last_10s": integer,
|
||||
"last_40s": integer
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
Examples:
|
||||
|
||||
$ iftop -i enp0s3 -t -P -s1 | jc --iftop -p
|
||||
[
|
||||
{
|
||||
"device": "enp0s3",
|
||||
"ip_address": "10.10.15.129",
|
||||
"mac_address": "08:00:27:c0:4a:4f",
|
||||
"clients": [
|
||||
{
|
||||
"index": 1,
|
||||
"connections": [
|
||||
{
|
||||
"host_name": "ubuntu-2004-clean-01",
|
||||
"host_port": "ssh",
|
||||
"last_2s": 448,
|
||||
"last_10s": 448,
|
||||
"last_40s": 448,
|
||||
"cumulative": 112,
|
||||
"direction": "send"
|
||||
},
|
||||
{
|
||||
"host_name": "10.10.15.72",
|
||||
"host_port": "40876",
|
||||
"last_2s": 208,
|
||||
"last_10s": 208,
|
||||
"last_40s": 208,
|
||||
"cumulative": 52,
|
||||
"direction": "receive"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"total_send_rate": {
|
||||
"last_2s": 448,
|
||||
"last_10s": 448,
|
||||
"last_40s": 448
|
||||
},
|
||||
"total_receive_rate": {
|
||||
"last_2s": 208,
|
||||
"last_10s": 208,
|
||||
"last_40s": 208
|
||||
},
|
||||
"total_send_and_receive_rate": {
|
||||
"last_2s": 656,
|
||||
"last_10s": 656,
|
||||
"last_40s": 656
|
||||
},
|
||||
"peak_rate": {
|
||||
"last_2s": 448,
|
||||
"last_10s": 208,
|
||||
"last_40s": 656
|
||||
},
|
||||
"cumulative_rate": {
|
||||
"last_2s": 112,
|
||||
"last_10s": 52,
|
||||
"last_40s": 164
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
$ iftop -i enp0s3 -t -P -s1 | jc --iftop -p -r
|
||||
[
|
||||
{
|
||||
"device": "enp0s3",
|
||||
"ip_address": "10.10.15.129",
|
||||
"mac_address": "11:22:33:44:55:66",
|
||||
"clients": [
|
||||
{
|
||||
"index": 1,
|
||||
"connections": [
|
||||
{
|
||||
"host_name": "ubuntu-2004-clean-01",
|
||||
"host_port": "ssh",
|
||||
"last_2s": "448b",
|
||||
"last_10s": "448b",
|
||||
"last_40s": "448b",
|
||||
"cumulative": "112B",
|
||||
"direction": "send"
|
||||
},
|
||||
{
|
||||
"host_name": "10.10.15.72",
|
||||
"host_port": "40876",
|
||||
"last_2s": "208b",
|
||||
"last_10s": "208b",
|
||||
"last_40s": "208b",
|
||||
"cumulative": "52B",
|
||||
"direction": "receive"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"total_send_rate": {
|
||||
"last_2s": "448b",
|
||||
"last_10s": "448b",
|
||||
"last_40s": "448b"
|
||||
},
|
||||
"total_receive_rate": {
|
||||
"last_2s": "208b",
|
||||
"last_10s": "208b",
|
||||
"last_40s": "208b"
|
||||
},
|
||||
"total_send_and_receive_rate": {
|
||||
"last_2s": "656b",
|
||||
"last_10s": "656b",
|
||||
"last_40s": "656b"
|
||||
},
|
||||
"peak_rate": {
|
||||
"last_2s": "448b",
|
||||
"last_10s": "208b",
|
||||
"last_40s": "656b"
|
||||
},
|
||||
"cumulative_rate": {
|
||||
"last_2s": "112B",
|
||||
"last_10s": "52B",
|
||||
"last_40s": "164B"
|
||||
}
|
||||
}
|
||||
]
|
||||
"""
|
||||
import re
|
||||
from typing import List, Dict
|
||||
from jc.jc_types import JSONDictType
|
||||
import jc.utils
|
||||
from collections import namedtuple
|
||||
from numbers import Number
|
||||
|
||||
|
||||
class info:
|
||||
"""Provides parser metadata (version, author, etc.)"""
|
||||
version = "1.0"
|
||||
description = "`iftop` command parser"
|
||||
author = "Ron Green"
|
||||
author_email = "11993626+georgettica@users.noreply.github.com"
|
||||
compatible = ["linux"]
|
||||
tags = ["command"]
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
|
||||
def _process(proc_data: List[JSONDictType], quiet: bool = False) -> List[JSONDictType]:
|
||||
"""
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (List of Dictionaries) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
List of Dictionaries. Structured to conform to the schema.
|
||||
"""
|
||||
string_to_bytes_fields = ["last_2s", "last_10s", "last_40s", "cumulative"]
|
||||
one_nesting = [
|
||||
"total_send_rate",
|
||||
"total_receive_rate",
|
||||
"total_send_and_receive_rate",
|
||||
"peak_rate",
|
||||
"cumulative_rate",
|
||||
]
|
||||
|
||||
if not proc_data:
|
||||
return proc_data
|
||||
for entry in proc_data:
|
||||
# print(f"{entry=}")
|
||||
for entry_key in entry:
|
||||
# print(f"{entry_key=}")
|
||||
if entry_key in one_nesting:
|
||||
# print(f"{entry[entry_key]=}")
|
||||
for one_nesting_item_key in entry[entry_key]:
|
||||
# print(f"{one_nesting_item_key=}")
|
||||
if one_nesting_item_key in string_to_bytes_fields:
|
||||
entry[entry_key][one_nesting_item_key] = _parse_size(entry[entry_key][one_nesting_item_key])
|
||||
elif entry_key == "clients":
|
||||
for client in entry[entry_key]:
|
||||
# print(f"{client=}")
|
||||
if "connections" not in client:
|
||||
continue
|
||||
for connection in client["connections"]:
|
||||
# print(f"{connection=}")
|
||||
for connection_key in connection:
|
||||
# print(f"{connection_key=}")
|
||||
if connection_key in string_to_bytes_fields:
|
||||
connection[connection_key] = _parse_size(connection[connection_key])
|
||||
return proc_data
|
||||
|
||||
# _parse_size from https://github.com/xolox/python-humanfriendly
|
||||
|
||||
# Copyright (c) 2021 Peter Odding
|
||||
|
||||
# Permission is hereby granted, free of charge, to any person obtaining
|
||||
# a copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish,
|
||||
# distribute, sublicense, and/or sell copies of the Software, and to
|
||||
# permit persons to whom the Software is furnished to do so, subject to
|
||||
# the following conditions:
|
||||
|
||||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
||||
# Note: this function can be replaced with jc.utils.convert_size_to_int
|
||||
# in the future.
|
||||
def _parse_size(size, binary=False):
|
||||
"""
|
||||
Parse a human readable data size and return the number of bytes.
|
||||
|
||||
:param size: The human readable file size to parse (a string).
|
||||
:param binary: :data:`True` to use binary multiples of bytes (base-2) for
|
||||
ambiguous unit symbols and names, :data:`False` to use
|
||||
decimal multiples of bytes (base-10).
|
||||
:returns: The corresponding size in bytes (an integer).
|
||||
:raises: :exc:`InvalidSize` when the input can't be parsed.
|
||||
|
||||
This function knows how to parse sizes in bytes, kilobytes, megabytes,
|
||||
gigabytes, terabytes and petabytes. Some examples:
|
||||
|
||||
>>> from humanfriendly import parse_size
|
||||
>>> parse_size('42')
|
||||
42
|
||||
>>> parse_size('13b')
|
||||
13
|
||||
>>> parse_size('5 bytes')
|
||||
5
|
||||
>>> parse_size('1 KB')
|
||||
1000
|
||||
>>> parse_size('1 kilobyte')
|
||||
1000
|
||||
>>> parse_size('1 KiB')
|
||||
1024
|
||||
>>> parse_size('1 KB', binary=True)
|
||||
1024
|
||||
>>> parse_size('1.5 GB')
|
||||
1500000000
|
||||
>>> parse_size('1.5 GB', binary=True)
|
||||
1610612736
|
||||
"""
|
||||
def tokenize(text):
|
||||
tokenized_input = []
|
||||
for token in re.split(r'(\d+(?:\.\d+)?)', text):
|
||||
token = token.strip()
|
||||
if re.match(r'\d+\.\d+', token):
|
||||
tokenized_input.append(float(token))
|
||||
elif token.isdigit():
|
||||
tokenized_input.append(int(token))
|
||||
elif token:
|
||||
tokenized_input.append(token)
|
||||
return tokenized_input
|
||||
|
||||
SizeUnit = namedtuple('SizeUnit', 'divider, symbol, name')
|
||||
CombinedUnit = namedtuple('CombinedUnit', 'decimal, binary')
|
||||
disk_size_units = (
|
||||
CombinedUnit(SizeUnit(1000**1, 'KB', 'kilobyte'), SizeUnit(1024**1, 'KiB', 'kibibyte')),
|
||||
CombinedUnit(SizeUnit(1000**2, 'MB', 'megabyte'), SizeUnit(1024**2, 'MiB', 'mebibyte')),
|
||||
CombinedUnit(SizeUnit(1000**3, 'GB', 'gigabyte'), SizeUnit(1024**3, 'GiB', 'gibibyte')),
|
||||
CombinedUnit(SizeUnit(1000**4, 'TB', 'terabyte'), SizeUnit(1024**4, 'TiB', 'tebibyte')),
|
||||
CombinedUnit(SizeUnit(1000**5, 'PB', 'petabyte'), SizeUnit(1024**5, 'PiB', 'pebibyte')),
|
||||
CombinedUnit(SizeUnit(1000**6, 'EB', 'exabyte'), SizeUnit(1024**6, 'EiB', 'exbibyte')),
|
||||
CombinedUnit(SizeUnit(1000**7, 'ZB', 'zettabyte'), SizeUnit(1024**7, 'ZiB', 'zebibyte')),
|
||||
CombinedUnit(SizeUnit(1000**8, 'YB', 'yottabyte'), SizeUnit(1024**8, 'YiB', 'yobibyte')),
|
||||
)
|
||||
tokens = tokenize(size)
|
||||
if tokens and isinstance(tokens[0], Number):
|
||||
# Get the normalized unit (if any) from the tokenized input.
|
||||
normalized_unit = tokens[1].lower() if len(tokens) == 2 and isinstance(tokens[1], str) else ''
|
||||
# If the input contains only a number, it's assumed to be the number of
|
||||
# bytes. The second token can also explicitly reference the unit bytes.
|
||||
if len(tokens) == 1 or normalized_unit.startswith('b'):
|
||||
return int(tokens[0])
|
||||
# Otherwise we expect two tokens: A number and a unit.
|
||||
if normalized_unit:
|
||||
# Convert plural units to singular units, for details:
|
||||
# https://github.com/xolox/python-humanfriendly/issues/26
|
||||
normalized_unit = normalized_unit.rstrip('s')
|
||||
for unit in disk_size_units:
|
||||
# First we check for unambiguous symbols (KiB, MiB, GiB, etc)
|
||||
# and names (kibibyte, mebibyte, gibibyte, etc) because their
|
||||
# handling is always the same.
|
||||
if normalized_unit in (unit.binary.symbol.lower(), unit.binary.name.lower()):
|
||||
return int(tokens[0] * unit.binary.divider)
|
||||
# Now we will deal with ambiguous prefixes (K, M, G, etc),
|
||||
# symbols (KB, MB, GB, etc) and names (kilobyte, megabyte,
|
||||
# gigabyte, etc) according to the caller's preference.
|
||||
if (normalized_unit in (unit.decimal.symbol.lower(), unit.decimal.name.lower()) or
|
||||
normalized_unit.startswith(unit.decimal.symbol[0].lower())):
|
||||
return int(tokens[0] * (unit.binary.divider if binary else unit.decimal.divider))
|
||||
# We failed to parse the size specification.
|
||||
return None
|
||||
|
||||
|
||||
def parse(data: str, raw: bool = False, quiet: bool = False) -> List[JSONDictType]:
|
||||
"""
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) unprocessed output if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of Dictionaries. Raw or processed structured data.
|
||||
"""
|
||||
jc.utils.compatibility(__name__, info.compatible, quiet)
|
||||
jc.utils.input_type_check(data)
|
||||
|
||||
raw_output: List[Dict] = []
|
||||
interface_item: Dict = {}
|
||||
current_client: Dict = {}
|
||||
clients: List = []
|
||||
is_previous_line_interface = False
|
||||
saw_already_host_line = False
|
||||
|
||||
before_arrow = r"\s+(?P<index>\d+)\s+(?P<host_name>[^\s]+):(?P<host_port>[^\s]+)\s+"
|
||||
before_arrow_no_port = r"\s+(?P<index>\d+)\s+(?P<host_name>[^\s]+)\s+"
|
||||
after_arrow_before_newline = r"\s+(?P<send_last_2s>[^\s]+)\s+(?P<send_last_10s>[^\s]+)\s+(?P<send_last_40s>[^\s]+)\s+(?P<send_cumulative>[^\s]+)"
|
||||
newline_before_arrow = r"\s+(?P<receive_ip>.+):(?P<receive_port>\w+)\s+"
|
||||
newline_before_arrow_no_port = r"\s+(?P<receive_ip>.+)\s+"
|
||||
after_arrow_till_end = r"\s+(?P<receive_last_2s>[^\s]+)\s+(?P<receive_last_10s>[^\s]+)\s+(?P<receive_last_40s>[^\s]+)\s+(?P<receive_cumulative>[^\s]+)"
|
||||
re_linux_clients_before_newline = re.compile(
|
||||
rf"{before_arrow}=>{after_arrow_before_newline}"
|
||||
)
|
||||
re_linux_clients_before_newline_no_port = re.compile(
|
||||
rf"{before_arrow_no_port}=>{after_arrow_before_newline}"
|
||||
)
|
||||
re_linux_clients_after_newline_no_port = re.compile(
|
||||
rf"{newline_before_arrow_no_port}<={after_arrow_till_end}"
|
||||
)
|
||||
|
||||
re_linux_clients_after_newline = re.compile(
|
||||
rf"{newline_before_arrow}<={after_arrow_till_end}"
|
||||
)
|
||||
|
||||
re_total_send_rate = re.compile(
|
||||
r"Total send rate:\s+(?P<total_send_rate_last_2s>[^\s]+)\s+(?P<total_send_rate_last_10s>[^\s]+)\s+(?P<total_send_rate_last_40s>[^\s]+)"
|
||||
)
|
||||
re_total_receive_rate = re.compile(
|
||||
r"Total receive rate:\s+(?P<total_receive_rate_last_2s>[^\s]+)\s+(?P<total_receive_rate_last_10s>[^\s]+)\s+(?P<total_receive_rate_last_40s>[^\s]+)"
|
||||
)
|
||||
re_total_send_and_receive_rate = re.compile(
|
||||
r"Total send and receive rate:\s+(?P<total_send_and_receive_rate_last_2s>[^\s]+)\s+(?P<total_send_and_receive_rate_last_10s>[^\s]+)\s+(?P<total_send_and_receive_rate_last_40s>[^\s]+)"
|
||||
)
|
||||
re_peak_rate = re.compile(
|
||||
r"Peak rate \(sent/received/total\):\s+(?P<peak_rate_sent>[^\s]+)\s+(?P<peak_rate_received>[^\s]+)\s+(?P<peak_rate_total>[^\s]+)"
|
||||
)
|
||||
re_cumulative_rate = re.compile(
|
||||
r"Cumulative \(sent/received/total\):\s+(?P<cumulative_rate_sent>[^\s]+)\s+(?P<cumulative_rate_received>[^\s]+)\s+(?P<cumulative_rate_total>[^\s]+)"
|
||||
)
|
||||
|
||||
jc.utils.compatibility(__name__, info.compatible, quiet)
|
||||
jc.utils.input_type_check(data)
|
||||
|
||||
if not jc.utils.has_data(data):
|
||||
return raw_output if raw else _process(raw_output, quiet=quiet)
|
||||
|
||||
for line in filter(None, data.splitlines()):
|
||||
if line.startswith("interface:"):
|
||||
# Example:
|
||||
# interface: enp0s3
|
||||
interface_item["device"] = line.split(":")[1].strip()
|
||||
|
||||
elif line.startswith("IP address is:"):
|
||||
# Example:
|
||||
# IP address is: 10.10.15.129
|
||||
interface_item["ip_address"] = line.split(":")[1].strip()
|
||||
|
||||
elif line.startswith("MAC address is:"):
|
||||
# Example:
|
||||
# MAC address is: 08:00:27:c0:4a:4f
|
||||
# strip off the "MAC address is: " part
|
||||
data_without_front_list = line.split(":")[1:]
|
||||
|
||||
# join the remaining parts back together
|
||||
data_without_front = ":".join(data_without_front_list)
|
||||
interface_item["mac_address"] = data_without_front.strip()
|
||||
|
||||
elif line.startswith("Listening on"):
|
||||
# Example:
|
||||
# Listening on enp0s3
|
||||
pass
|
||||
|
||||
elif (
|
||||
line.startswith("# Host name (port/service if enabled)")
|
||||
and not saw_already_host_line
|
||||
):
|
||||
saw_already_host_line = True
|
||||
# Example:
|
||||
# # Host name (port/service if enabled) last 2s last 10s last 40s cumulative
|
||||
pass
|
||||
|
||||
elif (
|
||||
line.startswith("# Host name (port/service if enabled)")
|
||||
and saw_already_host_line
|
||||
):
|
||||
old_interface_item, interface_item = interface_item, {}
|
||||
interface_item.update(
|
||||
{
|
||||
"device": old_interface_item["device"],
|
||||
"ip_address": old_interface_item["ip_address"],
|
||||
"mac_address": old_interface_item["mac_address"],
|
||||
}
|
||||
)
|
||||
|
||||
elif "=>" in line and is_previous_line_interface and ":" in line:
|
||||
# should not happen
|
||||
pass
|
||||
|
||||
elif "=>" in line and not is_previous_line_interface and ":" in line:
|
||||
# Example:
|
||||
# 1 ubuntu-2004-clean-01:ssh => 448b 448b 448b 112B
|
||||
is_previous_line_interface = True
|
||||
match_raw = re_linux_clients_before_newline.match(line)
|
||||
|
||||
if not match_raw:
|
||||
# this is a bug in iftop
|
||||
continue
|
||||
|
||||
match_dict = match_raw.groupdict()
|
||||
current_client = {}
|
||||
current_client["index"] = int(match_dict["index"])
|
||||
current_client["connections"] = []
|
||||
current_client_send = {
|
||||
"host_name": match_dict["host_name"],
|
||||
"host_port": match_dict["host_port"],
|
||||
"last_2s": match_dict["send_last_2s"],
|
||||
"last_10s": match_dict["send_last_10s"],
|
||||
"last_40s": match_dict["send_last_40s"],
|
||||
"cumulative": match_dict["send_cumulative"],
|
||||
"direction": "send",
|
||||
}
|
||||
current_client["connections"].append(current_client_send)
|
||||
# not adding yet as the receive part is not yet parsed
|
||||
|
||||
elif "=>" in line and not is_previous_line_interface and ":" not in line:
|
||||
# should not happen
|
||||
pass
|
||||
|
||||
elif "=>" in line and is_previous_line_interface and ":" not in line:
|
||||
is_previous_line_interface = True
|
||||
match_raw = re_linux_clients_before_newline_no_port.match(line)
|
||||
|
||||
if not match_raw:
|
||||
# this is a bug in iftop
|
||||
continue
|
||||
|
||||
match_dict = match_raw.groupdict()
|
||||
current_client = {}
|
||||
current_client["index"] = int(match_dict["index"])
|
||||
current_client["connections"] = []
|
||||
current_client_send = {
|
||||
"host_name": match_dict["host_name"],
|
||||
"last_2s": match_dict["send_last_2s"],
|
||||
"last_10s": match_dict["send_last_10s"],
|
||||
"last_40s": match_dict["send_last_40s"],
|
||||
"cumulative": match_dict["send_cumulative"],
|
||||
"direction": "send",
|
||||
}
|
||||
current_client["connections"].append(current_client_send)
|
||||
# not adding yet as the receive part is not yet parsed
|
||||
|
||||
elif "<=" in line and not is_previous_line_interface and ":" in line:
|
||||
# should not happen
|
||||
pass
|
||||
|
||||
elif "<=" in line and is_previous_line_interface and ":" in line:
|
||||
# Example:
|
||||
# 10.10.15.72:40876 <= 208b 208b 208b 52B
|
||||
is_previous_line_interface = False
|
||||
match_raw = re_linux_clients_after_newline.match(line)
|
||||
|
||||
if not match_raw:
|
||||
# this is a bug in iftop
|
||||
continue
|
||||
|
||||
match_dict = match_raw.groupdict()
|
||||
current_client_receive = {
|
||||
"host_name": match_dict["receive_ip"],
|
||||
"host_port": match_dict["receive_port"],
|
||||
"last_2s": match_dict["receive_last_2s"],
|
||||
"last_10s": match_dict["receive_last_10s"],
|
||||
"last_40s": match_dict["receive_last_40s"],
|
||||
"cumulative": match_dict["receive_cumulative"],
|
||||
"direction": "receive",
|
||||
}
|
||||
|
||||
current_client["connections"].append(current_client_receive)
|
||||
clients.append(current_client)
|
||||
|
||||
elif "<=" in line and not is_previous_line_interface and ":" not in line:
|
||||
# should not happen
|
||||
pass
|
||||
|
||||
elif "<=" in line and is_previous_line_interface and ":" not in line:
|
||||
# Example:
|
||||
# 10.10.15.72:40876 <= 208b 208b 208b 52B
|
||||
is_previous_line_interface = False
|
||||
match_raw = re_linux_clients_after_newline_no_port.match(line)
|
||||
|
||||
if not match_raw:
|
||||
# this is a bug in iftop
|
||||
continue
|
||||
|
||||
match_dict = match_raw.groupdict()
|
||||
current_client_receive = {
|
||||
"host_name": match_dict["receive_ip"],
|
||||
"last_2s": match_dict["receive_last_2s"],
|
||||
"last_10s": match_dict["receive_last_10s"],
|
||||
"last_40s": match_dict["receive_last_40s"],
|
||||
"cumulative": match_dict["receive_cumulative"],
|
||||
"direction": "receive",
|
||||
}
|
||||
|
||||
current_client["connections"].append(current_client_receive)
|
||||
clients.append(current_client)
|
||||
|
||||
# check if all of the characters are dashes or equal signs
|
||||
elif all(c == "-" for c in line):
|
||||
pass
|
||||
|
||||
elif line.startswith("Total send rate"):
|
||||
# Example:
|
||||
# Total send rate: 448b 448b 448b
|
||||
match_raw = re_total_send_rate.match(line)
|
||||
|
||||
if not match_raw:
|
||||
# this is a bug in iftop
|
||||
continue
|
||||
|
||||
match_dict = match_raw.groupdict()
|
||||
interface_item["total_send_rate"] = {}
|
||||
interface_item["total_send_rate"].update(
|
||||
{
|
||||
"last_2s": match_dict["total_send_rate_last_2s"],
|
||||
"last_10s": match_dict["total_send_rate_last_10s"],
|
||||
"last_40s": match_dict["total_send_rate_last_40s"],
|
||||
}
|
||||
)
|
||||
|
||||
elif line.startswith("Total receive rate"):
|
||||
# Example:
|
||||
# Total receive rate: 208b 208b 208b
|
||||
match_raw = re_total_receive_rate.match(line)
|
||||
|
||||
if not match_raw:
|
||||
# this is a bug in iftop
|
||||
continue
|
||||
|
||||
match_dict = match_raw.groupdict()
|
||||
interface_item["total_receive_rate"] = {}
|
||||
interface_item["total_receive_rate"].update(
|
||||
{
|
||||
"last_2s": match_dict["total_receive_rate_last_2s"],
|
||||
"last_10s": match_dict["total_receive_rate_last_10s"],
|
||||
"last_40s": match_dict["total_receive_rate_last_40s"],
|
||||
}
|
||||
)
|
||||
|
||||
elif line.startswith("Total send and receive rate"):
|
||||
# Example:
|
||||
# Total send and receive rate: 656b 656b 656b
|
||||
match_raw = re_total_send_and_receive_rate.match(line)
|
||||
|
||||
if not match_raw:
|
||||
# this is a bug in iftop
|
||||
continue
|
||||
|
||||
match_dict = match_raw.groupdict()
|
||||
interface_item["total_send_and_receive_rate"] = {}
|
||||
interface_item["total_send_and_receive_rate"].update(
|
||||
{
|
||||
"last_2s": match_dict["total_send_and_receive_rate_last_2s"],
|
||||
"last_10s": match_dict["total_send_and_receive_rate_last_10s"],
|
||||
"last_40s": match_dict["total_send_and_receive_rate_last_40s"],
|
||||
}
|
||||
)
|
||||
|
||||
elif line.startswith("Peak rate"):
|
||||
match_raw = re_peak_rate.match(line)
|
||||
|
||||
if not match_raw:
|
||||
# this is a bug in iftop
|
||||
continue
|
||||
|
||||
match_dict = match_raw.groupdict()
|
||||
interface_item["peak_rate"] = {}
|
||||
interface_item["peak_rate"].update(
|
||||
{
|
||||
"last_2s": match_dict["peak_rate_sent"],
|
||||
"last_10s": match_dict["peak_rate_received"],
|
||||
"last_40s": match_dict["peak_rate_total"],
|
||||
}
|
||||
)
|
||||
|
||||
elif line.startswith("Cumulative"):
|
||||
match_raw = re_cumulative_rate.match(line)
|
||||
|
||||
if not match_raw:
|
||||
# this is a bug in iftop
|
||||
continue
|
||||
|
||||
match_dict = match_raw.groupdict()
|
||||
interface_item["cumulative_rate"] = {}
|
||||
interface_item["cumulative_rate"].update(
|
||||
{
|
||||
"last_2s": match_dict["cumulative_rate_sent"],
|
||||
"last_10s": match_dict["cumulative_rate_received"],
|
||||
"last_40s": match_dict["cumulative_rate_total"],
|
||||
}
|
||||
)
|
||||
|
||||
elif all(c == "=" for c in line):
|
||||
interface_item["clients"] = clients
|
||||
clients = []
|
||||
# keep the copy here as without it keeps the objects linked
|
||||
raw_output.append(interface_item.copy())
|
||||
|
||||
return raw_output if raw else _process(raw_output, quiet=quiet)
|
||||
@@ -17,12 +17,12 @@ contained in lists/arrays.
|
||||
|
||||
Usage (cli):
|
||||
|
||||
$ cat foo.ini | jc --ini
|
||||
$ cat foo.ini | jc --ini-dup
|
||||
|
||||
Usage (module):
|
||||
|
||||
import jc
|
||||
result = jc.parse('ini', ini_file_output)
|
||||
result = jc.parse('ini_dup', ini_file_output)
|
||||
|
||||
Schema:
|
||||
|
||||
@@ -62,7 +62,7 @@ Examples:
|
||||
fruit = peach
|
||||
color = green
|
||||
|
||||
$ cat example.ini | jc --ini -p
|
||||
$ cat example.ini | jc --ini-dup -p
|
||||
{
|
||||
"foo": [
|
||||
"fiz"
|
||||
@@ -97,7 +97,7 @@ import uuid
|
||||
|
||||
class info():
|
||||
"""Provides parser metadata (version, author, etc.)"""
|
||||
version = '1.0'
|
||||
version = '1.1'
|
||||
description = 'INI with duplicate key file parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
|
||||
145
jc/parsers/ip_route.py
Normal file
145
jc/parsers/ip_route.py
Normal file
@@ -0,0 +1,145 @@
|
||||
"""jc - JSON Convert `ip route` command output parser
|
||||
|
||||
Usage (cli):
|
||||
|
||||
$ ip route | jc --ip-route
|
||||
|
||||
or
|
||||
|
||||
$ jc ip-route
|
||||
|
||||
Usage (module):
|
||||
|
||||
import jc
|
||||
result = jc.parse('ip_route', ip_route_command_output)
|
||||
|
||||
Schema:
|
||||
|
||||
[
|
||||
{
|
||||
"ip": string,
|
||||
"via": string,
|
||||
"dev": string,
|
||||
"metric": integer,
|
||||
"proto": string,
|
||||
"scope": string,
|
||||
"src": string,
|
||||
"via": string,
|
||||
"status": string
|
||||
}
|
||||
]
|
||||
|
||||
Examples:
|
||||
|
||||
$ ip route | jc --ip-route -p
|
||||
[
|
||||
{
|
||||
"ip": "10.0.2.0/24",
|
||||
"dev": "enp0s3",
|
||||
"proto": "kernel",
|
||||
"scope": "link",
|
||||
"src": "10.0.2.15",
|
||||
"metric": 100
|
||||
}
|
||||
]
|
||||
"""
|
||||
from typing import Dict
|
||||
|
||||
import jc.utils
|
||||
|
||||
|
||||
class info:
|
||||
"""Provides parser metadata (version, author, etc.)"""
|
||||
version = '1.0'
|
||||
description = '`ip route` command parser'
|
||||
author = 'Julian Jackson'
|
||||
author_email = 'jackson.julian55@yahoo.com'
|
||||
compatible = ['linux']
|
||||
magic_commands = ['ip route']
|
||||
tags = ['command']
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
|
||||
def parse(data, raw=False, quiet=False):
|
||||
"""
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) unprocessed output if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of Json objects if data is processed and Raw data if raw = true.
|
||||
"""
|
||||
structure = {}
|
||||
items = []
|
||||
lines = data.splitlines()
|
||||
index = 0
|
||||
place = 0
|
||||
inc = 0
|
||||
|
||||
for line in lines:
|
||||
temp = line.split()
|
||||
for word in temp:
|
||||
if word == 'via':
|
||||
y = {'via': temp[place + 1]}
|
||||
place += 1
|
||||
structure.update(y)
|
||||
elif word == 'dev':
|
||||
y = {'dev': temp[place + 1]}
|
||||
place += 1
|
||||
structure.update(y)
|
||||
elif word == 'metric':
|
||||
if raw:
|
||||
y = {'metric': temp[place + 1]}
|
||||
else:
|
||||
y = {'metric': jc.utils.convert_to_int(temp[place+1])}
|
||||
place += 1
|
||||
structure.update(y)
|
||||
elif word == 'proto':
|
||||
y = {'proto': temp[place + 1]}
|
||||
place += 1
|
||||
structure.update(y)
|
||||
elif word == 'scope':
|
||||
y = {'scope': temp[place + 1]}
|
||||
place += 1
|
||||
structure.update(y)
|
||||
elif word == 'src':
|
||||
y = {'src': temp[place + 1]}
|
||||
place += 1
|
||||
structure.update(y)
|
||||
elif word == 'status':
|
||||
y = {'status': temp[place + 1]}
|
||||
place += 1
|
||||
structure.update(y)
|
||||
elif word == 'default':
|
||||
y = {'ip': 'default'}
|
||||
place += 1
|
||||
structure.update(y)
|
||||
elif word == 'linkdown':
|
||||
y = {'status': 'linkdown'}
|
||||
place += 1
|
||||
structure.update(y)
|
||||
else:
|
||||
y = {'ip': temp[0]}
|
||||
place += 1
|
||||
structure.update(y)
|
||||
if y.get("ip") != "":
|
||||
items.append(structure)
|
||||
structure = {}
|
||||
place = 0
|
||||
index += 1
|
||||
inc += 1
|
||||
|
||||
jc.utils.compatibility(__name__, info.compatible, quiet)
|
||||
jc.utils.input_type_check(data)
|
||||
|
||||
if not jc.utils.has_data(data):
|
||||
return []
|
||||
|
||||
return items
|
||||
@@ -25,7 +25,7 @@ Schema:
|
||||
"num" integer,
|
||||
"pkts": integer,
|
||||
"bytes": integer, # converted based on suffix
|
||||
"target": string,
|
||||
"target": string, # Null if blank
|
||||
"prot": string,
|
||||
"opt": string, # "--" = Null
|
||||
"in": string,
|
||||
@@ -163,7 +163,7 @@ import jc.utils
|
||||
|
||||
class info():
|
||||
"""Provides parser metadata (version, author, etc.)"""
|
||||
version = '1.8'
|
||||
version = '1.9'
|
||||
description = '`iptables` command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -222,6 +222,10 @@ def _process(proc_data):
|
||||
if rule['opt'] == '--':
|
||||
rule['opt'] = None
|
||||
|
||||
if 'target' in rule:
|
||||
if rule['target'] == '':
|
||||
rule['target'] = None
|
||||
|
||||
return proc_data
|
||||
|
||||
|
||||
@@ -271,15 +275,18 @@ def parse(data, raw=False, quiet=False):
|
||||
continue
|
||||
|
||||
else:
|
||||
# sometimes the "target" column is blank. Stuff in a dummy character
|
||||
if headers[0] == 'target' and line.startswith(' '):
|
||||
line = '\u2063' + line
|
||||
|
||||
rule = line.split(maxsplit=len(headers) - 1)
|
||||
temp_rule = dict(zip(headers, rule))
|
||||
if temp_rule:
|
||||
if temp_rule.get('target') == '\u2063':
|
||||
temp_rule['target'] = ''
|
||||
chain['rules'].append(temp_rule)
|
||||
|
||||
if chain:
|
||||
raw_output.append(chain)
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
else:
|
||||
return _process(raw_output)
|
||||
return raw_output if raw else _process(raw_output)
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
"""jc - JSON Convert ISO 8601 Datetime string parser
|
||||
|
||||
This parser has been renamed to datetime-iso (cli) or datetime_iso (module).
|
||||
|
||||
This parser will be removed in a future version, so please start using
|
||||
the new parser name.
|
||||
"""
|
||||
from jc.parsers import datetime_iso
|
||||
import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
"""Provides parser metadata (version, author, etc.)"""
|
||||
version = '1.1'
|
||||
description = 'Deprecated - please use datetime-iso'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
details = 'Deprecated - please use datetime-iso'
|
||||
compatible = ['linux', 'aix', 'freebsd', 'darwin', 'win32', 'cygwin']
|
||||
tags = ['standard', 'string']
|
||||
deprecated = True
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
|
||||
def parse(data, raw=False, quiet=False):
|
||||
"""
|
||||
This parser is deprecated and calls datetime_iso. Please use datetime_iso
|
||||
directly. This parser will be removed in the future.
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) unprocessed output if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Raw or processed structured data.
|
||||
"""
|
||||
jc.utils.warning_message([
|
||||
'iso-datetime parser is deprecated. Please use datetime-iso instead.'
|
||||
])
|
||||
|
||||
return datetime_iso.parse(data, raw=raw, quiet=quiet)
|
||||
89
jc/parsers/lsb_release.py
Normal file
89
jc/parsers/lsb_release.py
Normal file
@@ -0,0 +1,89 @@
|
||||
"""jc - JSON Convert `lsb_release` command parser
|
||||
|
||||
This parser is an alias to the Key/Value parser (`--kv`).
|
||||
|
||||
Usage (cli):
|
||||
|
||||
$ lsb_release -a | jc --lsb-release
|
||||
|
||||
or
|
||||
$ jc lsb_release -a
|
||||
|
||||
Usage (module):
|
||||
|
||||
import jc
|
||||
result = jc.parse('lsb_release', lsb_release_command_output)
|
||||
|
||||
Schema:
|
||||
|
||||
{
|
||||
"<key>": string
|
||||
}
|
||||
|
||||
Examples:
|
||||
|
||||
$ lsb_release -a | jc --lsb-release -p
|
||||
{
|
||||
"Distributor ID": "Ubuntu",
|
||||
"Description": "Ubuntu 16.04.6 LTS",
|
||||
"Release": "16.04",
|
||||
"Codename": "xenial"
|
||||
}
|
||||
"""
|
||||
from jc.jc_types import JSONDictType
|
||||
import jc.parsers.kv
|
||||
import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
"""Provides parser metadata (version, author, etc.)"""
|
||||
version = '1.0'
|
||||
description = '`lsb_release` command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
details = 'Using the Key/Value parser'
|
||||
compatible = ['linux', 'darwin', 'cygwin', 'win32', 'aix', 'freebsd']
|
||||
magic_commands = ['lsb_release']
|
||||
tags = ['command']
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
|
||||
def _process(proc_data: JSONDictType) -> JSONDictType:
|
||||
"""
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (Dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Structured to conform to the schema.
|
||||
"""
|
||||
return jc.parsers.kv._process(proc_data)
|
||||
|
||||
|
||||
def parse(
|
||||
data: str,
|
||||
raw: bool = False,
|
||||
quiet: bool = False
|
||||
) -> JSONDictType:
|
||||
"""
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) unprocessed output if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Raw or processed structured data.
|
||||
"""
|
||||
jc.utils.compatibility(__name__, info.compatible, quiet)
|
||||
raw_output = jc.parsers.kv.parse(data, raw, quiet)
|
||||
|
||||
return raw_output if raw else _process(raw_output)
|
||||
@@ -70,12 +70,14 @@ Example:
|
||||
...
|
||||
]
|
||||
"""
|
||||
import re
|
||||
|
||||
import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
"""Provides parser metadata (version, author, etc.)"""
|
||||
version = '1.8'
|
||||
version = '1.9'
|
||||
description = '`mount` command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -133,14 +135,26 @@ def _linux_parse(data):
|
||||
|
||||
for entry in data:
|
||||
output_line = {}
|
||||
parsed_line = entry.split()
|
||||
|
||||
output_line['filesystem'] = parsed_line[0]
|
||||
output_line['mount_point'] = parsed_line[2]
|
||||
output_line['type'] = parsed_line[4]
|
||||
output_line['options'] = parsed_line[5].lstrip('(').rstrip(')').split(',')
|
||||
pattern = re.compile(
|
||||
r'''
|
||||
(?P<filesystem>\S+)\s+
|
||||
on\s+
|
||||
(?P<mount_point>.*?)\s+
|
||||
type\s+
|
||||
(?P<type>\S+)\s+
|
||||
\((?P<options>.*?)\)\s*''',
|
||||
re.VERBOSE)
|
||||
|
||||
output.append(output_line)
|
||||
match = pattern.match(entry)
|
||||
groups = match.groupdict()
|
||||
|
||||
if groups:
|
||||
output_line['filesystem'] = groups["filesystem"]
|
||||
output_line['mount_point'] = groups["mount_point"]
|
||||
output_line['type'] = groups["type"]
|
||||
output_line['options'] = groups["options"].split(',')
|
||||
output.append(output_line)
|
||||
|
||||
return output
|
||||
|
||||
@@ -160,7 +174,7 @@ def _aix_parse(data):
|
||||
|
||||
# AIX mount entries have the remote node as the zeroth element. If the
|
||||
# mount is local, the zeroth element is the filesystem instead. We can
|
||||
# detect this by the lenth of the list. For local mounts, length is 7,
|
||||
# detect this by the length of the list. For local mounts, length is 7,
|
||||
# and for remote mounts, the length is 8. In the remote case, pop off
|
||||
# the zeroth element. Then parsed_line has a consistent format.
|
||||
if len(parsed_line) == 8:
|
||||
|
||||
@@ -355,7 +355,7 @@ import jc.utils
|
||||
|
||||
class info():
|
||||
"""Provides parser metadata (version, author, etc.)"""
|
||||
version = '1.14'
|
||||
version = '1.15'
|
||||
description = '`netstat` command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
|
||||
@@ -31,10 +31,19 @@ def normalize_interface_headers(header):
|
||||
|
||||
|
||||
def parse_network(headers, entry):
|
||||
LIST_OF_STATES = [
|
||||
"ESTABLISHED", "SYN_SENT", "SYN_RECV", "FIN_WAIT1", "FIN_WAIT2",
|
||||
"TIME_WAIT", "CLOSED", "CLOSE_WAIT", "LAST_ACK", "LISTEN", "CLOSING",
|
||||
"UNKNOWN", "7"
|
||||
]
|
||||
|
||||
# split entry based on presence of value in "State" column
|
||||
contains_state = any(state in entry for state in LIST_OF_STATES)
|
||||
split_modifier = 1 if contains_state else 2
|
||||
entry = entry.split(maxsplit=len(headers) - split_modifier)
|
||||
|
||||
# Count words in header
|
||||
# if len of line is one less than len of header, then insert None in field 5
|
||||
entry = entry.split(maxsplit=len(headers) - 1)
|
||||
|
||||
if len(entry) == len(headers) - 1:
|
||||
entry.insert(5, None)
|
||||
|
||||
|
||||
236
jc/parsers/nsd_control.py
Normal file
236
jc/parsers/nsd_control.py
Normal file
@@ -0,0 +1,236 @@
|
||||
"""jc - JSON Convert `nsd-control` command output parser
|
||||
|
||||
Usage (cli):
|
||||
|
||||
$ nsd-control | jc --nsd-control
|
||||
|
||||
or
|
||||
|
||||
$ jc nsd-control
|
||||
|
||||
Usage (module):
|
||||
|
||||
import jc
|
||||
result = jc.parse('nsd_control', nsd_control_command_output)
|
||||
|
||||
Schema:
|
||||
|
||||
[
|
||||
{
|
||||
"version": string,
|
||||
"verbosity": integer,
|
||||
"ratelimit": integer
|
||||
}
|
||||
]
|
||||
|
||||
[
|
||||
{
|
||||
"zone": string
|
||||
"status": {
|
||||
"state": string,
|
||||
"served-serial": string,
|
||||
"commit-serial": string,
|
||||
"wait": string
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
Examples:
|
||||
|
||||
$ nsd-control | jc --nsd-control status
|
||||
[
|
||||
{
|
||||
"version": "4.6.2",
|
||||
"verbosity": "2",
|
||||
"ratelimit": "0"
|
||||
}
|
||||
]
|
||||
|
||||
$ nsd-control | jc --nsd-control zonestatus sunet.se
|
||||
[
|
||||
{
|
||||
"zone": "sunet.se",
|
||||
"status": {
|
||||
"state": "ok",
|
||||
"served-serial": "2023090704 since 2023-09-07T16:34:27",
|
||||
"commit-serial": "2023090704 since 2023-09-07T16:34:27",
|
||||
"wait": "28684 sec between attempts"
|
||||
}
|
||||
}
|
||||
]
|
||||
|
||||
"""
|
||||
from typing import List, Dict
|
||||
import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
"""Provides parser metadata (version, author, etc.)"""
|
||||
version = '1.0'
|
||||
description = '`nsd-control` command parser'
|
||||
author = 'Pettai'
|
||||
author_email = 'pettai@sunet.se'
|
||||
compatible = ['linux', 'darwin', 'cygwin', 'win32', 'aix', 'freebsd']
|
||||
tags = ['command']
|
||||
magic_commands = ['nsd-control']
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
|
||||
def _process(proc_data):
|
||||
"""
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (List of Dictionaries) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
List of Dictionaries. Structured to conform to the schema.
|
||||
"""
|
||||
int_list = {'verbosity', 'ratelimit', 'wait'}
|
||||
|
||||
for entry in proc_data:
|
||||
for key in entry:
|
||||
if key in int_list:
|
||||
entry[key] = jc.utils.convert_to_int(entry[key])
|
||||
|
||||
return proc_data
|
||||
|
||||
|
||||
def parse(data: str, raw: bool = False, quiet: bool = False):
|
||||
"""
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) unprocessed output if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of Dictionaries. Raw or processed structured data.
|
||||
"""
|
||||
jc.utils.compatibility(__name__, info.compatible, quiet)
|
||||
jc.utils.input_type_check(data)
|
||||
|
||||
raw_output: List[Dict] = []
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
itrparse = False
|
||||
itr: Dict = {}
|
||||
|
||||
for line in filter(None, data.splitlines()):
|
||||
line = line.strip()
|
||||
|
||||
# default 'ok'
|
||||
if line.startswith('ok'):
|
||||
raw_output.append({'command': 'ok'})
|
||||
continue
|
||||
|
||||
# status
|
||||
if line.startswith('version:'):
|
||||
status = {}
|
||||
linedata = line.split(':', maxsplit=1)
|
||||
version = linedata[1].strip()
|
||||
status.update({'version': version})
|
||||
continue
|
||||
|
||||
if line.startswith('verbosity:'):
|
||||
linedata = line.split(':', maxsplit=1)
|
||||
verbosity = linedata[1]
|
||||
status.update({'verbosity': verbosity})
|
||||
continue
|
||||
|
||||
if line.startswith('ratelimit:'):
|
||||
linedata = line.split(':', maxsplit=1)
|
||||
ratelimit = linedata[1]
|
||||
status.update({'ratelimit': ratelimit})
|
||||
raw_output.append(status)
|
||||
continue
|
||||
|
||||
# print_cookie_secrets
|
||||
if line.startswith('active'):
|
||||
itrparse = True
|
||||
itr = {}
|
||||
linedata = line.split(':', maxsplit=1)
|
||||
active = linedata[1].strip()
|
||||
itr.update({'active': active})
|
||||
continue
|
||||
|
||||
if line.startswith('staging'):
|
||||
linedata = line.split(':', maxsplit=1)
|
||||
staging = linedata[1].strip()
|
||||
itr.update({'staging': staging})
|
||||
continue
|
||||
|
||||
# print_tsig
|
||||
if line.startswith('key:'):
|
||||
tsigs = {}
|
||||
tsigdata = dict()
|
||||
linedata = line.split(' ', maxsplit=6)
|
||||
name = linedata[2].strip('"').rstrip('"')
|
||||
tsigdata.update({'name': name})
|
||||
secret = linedata[4].strip('"').rstrip('"')
|
||||
tsigdata.update({'secret': secret})
|
||||
algorithm = linedata[6].strip('"').rstrip('"')
|
||||
tsigdata.update({'algorithm': algorithm})
|
||||
tsigs.update({'key': tsigdata})
|
||||
raw_output.append(tsigs)
|
||||
continue
|
||||
|
||||
# zonestatus
|
||||
if line.startswith('zone:'):
|
||||
zonename: Dict = dict()
|
||||
zstatus: Dict = dict()
|
||||
linedata = line.split(':\t', maxsplit=1)
|
||||
zone = linedata[1]
|
||||
zonename.update({'zone': zone})
|
||||
continue
|
||||
|
||||
if line.startswith('state:'):
|
||||
linedata = line.split(': ', maxsplit=1)
|
||||
state = linedata[1]
|
||||
zstatus.update({'state': state})
|
||||
continue
|
||||
|
||||
if line.startswith('served-serial:'):
|
||||
linedata = line.split(': ', maxsplit=1)
|
||||
served = linedata[1].strip('"').rstrip('"')
|
||||
zstatus.update({'served-serial': served})
|
||||
continue
|
||||
|
||||
if line.startswith('commit-serial:'):
|
||||
linedata = line.split(': ', maxsplit=1)
|
||||
commit = linedata[1].strip('"').rstrip('"')
|
||||
zstatus.update({'commit-serial': commit})
|
||||
continue
|
||||
|
||||
if line.startswith('wait:'):
|
||||
linedata = line.split(': ', maxsplit=1)
|
||||
wait = linedata[1].strip('"').rstrip('"')
|
||||
zstatus.update({'wait': wait})
|
||||
zonename.update({'status': zstatus})
|
||||
raw_output.append(zonename)
|
||||
continue
|
||||
|
||||
# stats
|
||||
if line.startswith('server') or line.startswith('num.') or line.startswith('size.') or line.startswith('time.') or line.startswith('zone.'):
|
||||
itrparse = True
|
||||
linedata = line.split('=', maxsplit=1)
|
||||
key = linedata[0]
|
||||
if key.startswith('time.'):
|
||||
value = float(linedata[1])
|
||||
else:
|
||||
value = int(linedata[1])
|
||||
itr.update({key: value})
|
||||
continue
|
||||
|
||||
if itrparse:
|
||||
raw_output.append(itr)
|
||||
|
||||
return raw_output if raw else _process(raw_output)
|
||||
113
jc/parsers/os_release.py
Normal file
113
jc/parsers/os_release.py
Normal file
@@ -0,0 +1,113 @@
|
||||
"""jc - JSON Convert `/etc/os-release` file parser
|
||||
|
||||
This parser is an alias to the Key/Value parser (`--kv`).
|
||||
|
||||
Usage (cli):
|
||||
|
||||
$ cat /etc/os-release | jc --os-release
|
||||
|
||||
Usage (module):
|
||||
|
||||
import jc
|
||||
result = jc.parse('os_release', os_release_output)
|
||||
|
||||
Schema:
|
||||
|
||||
{
|
||||
"<key>": string
|
||||
}
|
||||
|
||||
Examples:
|
||||
|
||||
$ cat /etc/os-release | jc --os-release -p
|
||||
{
|
||||
"NAME": "CentOS Linux",
|
||||
"VERSION": "7 (Core)",
|
||||
"ID": "centos",
|
||||
"ID_LIKE": "rhel fedora",
|
||||
"VERSION_ID": "7",
|
||||
"PRETTY_NAME": "CentOS Linux 7 (Core)",
|
||||
"ANSI_COLOR": "0;31",
|
||||
"CPE_NAME": "cpe:/o:centos:centos:7",
|
||||
"HOME_URL": "https://www.centos.org/",
|
||||
"BUG_REPORT_URL": "https://bugs.centos.org/",
|
||||
"CENTOS_MANTISBT_PROJECT": "CentOS-7",
|
||||
"CENTOS_MANTISBT_PROJECT_VERSION": "7",
|
||||
"REDHAT_SUPPORT_PRODUCT": "centos",
|
||||
"REDHAT_SUPPORT_PRODUCT_VERSION": "7"
|
||||
}
|
||||
|
||||
$ cat /etc/os-release | jc --os-release -p -r
|
||||
{
|
||||
"NAME": "\\"CentOS Linux\\"",
|
||||
"VERSION": "\\"7 (Core)\\"",
|
||||
"ID": "\\"centos\\"",
|
||||
"ID_LIKE": "\\"rhel fedora\\"",
|
||||
"VERSION_ID": "\\"7\\"",
|
||||
"PRETTY_NAME": "\\"CentOS Linux 7 (Core)\\"",
|
||||
"ANSI_COLOR": "\\"0;31\\"",
|
||||
"CPE_NAME": "\\"cpe:/o:centos:centos:7\\"",
|
||||
"HOME_URL": "\\"https://www.centos.org/\\"",
|
||||
"BUG_REPORT_URL": "\\"https://bugs.centos.org/\\"",
|
||||
"CENTOS_MANTISBT_PROJECT": "\\"CentOS-7\\"",
|
||||
"CENTOS_MANTISBT_PROJECT_VERSION": "\\"7\\"",
|
||||
"REDHAT_SUPPORT_PRODUCT": "\\"centos\\"",
|
||||
"REDHAT_SUPPORT_PRODUCT_VERSION": "\\"7\\""
|
||||
}
|
||||
"""
|
||||
from jc.jc_types import JSONDictType
|
||||
import jc.parsers.kv
|
||||
import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
"""Provides parser metadata (version, author, etc.)"""
|
||||
version = '1.0'
|
||||
description = '`/etc/os-release` file parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
details = 'Using the Key/Value parser'
|
||||
compatible = ['linux', 'darwin', 'cygwin', 'win32', 'aix', 'freebsd']
|
||||
tags = ['file', 'standard', 'string']
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
|
||||
def _process(proc_data: JSONDictType) -> JSONDictType:
|
||||
"""
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (Dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Structured to conform to the schema.
|
||||
"""
|
||||
return jc.parsers.kv._process(proc_data)
|
||||
|
||||
|
||||
def parse(
|
||||
data: str,
|
||||
raw: bool = False,
|
||||
quiet: bool = False
|
||||
) -> JSONDictType:
|
||||
"""
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) unprocessed output if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Raw or processed structured data.
|
||||
"""
|
||||
jc.utils.compatibility(__name__, info.compatible, quiet)
|
||||
raw_output = jc.parsers.kv.parse(data, raw, quiet)
|
||||
|
||||
return raw_output if raw else _process(raw_output)
|
||||
@@ -28,12 +28,12 @@
|
||||
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
|
||||
# OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
import sys
|
||||
import string
|
||||
|
||||
if sys.version_info >= (3, 0):
|
||||
def unichr(character): # pylint: disable=redefined-builtin
|
||||
return chr(character)
|
||||
|
||||
def unichr(character): # pylint: disable=redefined-builtin
|
||||
return chr(character)
|
||||
|
||||
|
||||
def ConvertNEXTSTEPToUnicode(hex_digits):
|
||||
# taken from http://ftp.unicode.org/Public/MAPPINGS/VENDORS/NEXT/NEXTSTEP.TXT
|
||||
|
||||
@@ -64,12 +64,10 @@ def GetFileEncoding(path):
|
||||
def OpenFileWithEncoding(file_path, encoding):
|
||||
return codecs.open(file_path, 'r', encoding=encoding, errors='ignore')
|
||||
|
||||
if sys.version_info < (3, 0):
|
||||
def OpenFile(file_path):
|
||||
return open(file_path, 'rb')
|
||||
else:
|
||||
def OpenFile(file_path):
|
||||
return open(file_path, 'br')
|
||||
|
||||
def OpenFile(file_path):
|
||||
return open(file_path, 'rb')
|
||||
|
||||
|
||||
class PBParser(object):
|
||||
|
||||
|
||||
@@ -32,7 +32,7 @@ import sys
|
||||
from functools import cmp_to_key
|
||||
|
||||
# for python 3.10+ compatibility
|
||||
if sys.version_info.major == 3 and sys.version_info.minor >= 10:
|
||||
if sys.version_info >= (3, 10):
|
||||
import collections
|
||||
setattr(collections, "MutableMapping", collections.abc.MutableMapping)
|
||||
|
||||
|
||||
@@ -40,6 +40,9 @@ Schema:
|
||||
"kb_ccwr_s": float,
|
||||
"cswch_s": float,
|
||||
"nvcswch_s": float,
|
||||
"usr_ms": integer,
|
||||
"system_ms": integer,
|
||||
"guest_ms": integer,
|
||||
"command": string
|
||||
}
|
||||
]
|
||||
@@ -128,7 +131,7 @@ from jc.exceptions import ParseError
|
||||
|
||||
class info():
|
||||
"""Provides parser metadata (version, author, etc.)"""
|
||||
version = '1.1'
|
||||
version = '1.3'
|
||||
description = '`pidstat -H` command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -152,11 +155,16 @@ def _process(proc_data: List[Dict]) -> List[Dict]:
|
||||
|
||||
List of Dictionaries. Structured to conform to the schema.
|
||||
"""
|
||||
int_list = {'time', 'uid', 'pid', 'cpu', 'vsz', 'rss', 'stksize', 'stkref'}
|
||||
int_list = {
|
||||
'time', 'uid', 'pid', 'cpu', 'vsz', 'rss', 'stksize', 'stkref',
|
||||
'usr_ms', 'system_ms', 'guest_ms'
|
||||
}
|
||||
|
||||
float_list = {'percent_usr', 'percent_system', 'percent_guest', 'percent_cpu',
|
||||
'minflt_s', 'majflt_s', 'percent_mem', 'kb_rd_s', 'kb_wr_s',
|
||||
'kb_ccwr_s', 'cswch_s', 'nvcswch_s'}
|
||||
float_list = {
|
||||
'percent_usr', 'percent_system', 'percent_guest', 'percent_cpu',
|
||||
'minflt_s', 'majflt_s', 'percent_mem', 'kb_rd_s', 'kb_wr_s',
|
||||
'kb_ccwr_s', 'cswch_s', 'nvcswch_s', 'percent_wait'
|
||||
}
|
||||
|
||||
for entry in proc_data:
|
||||
for key in entry:
|
||||
@@ -169,6 +177,14 @@ def _process(proc_data: List[Dict]) -> List[Dict]:
|
||||
return proc_data
|
||||
|
||||
|
||||
def normalize_header(header: str) -> str:
|
||||
return header.replace('#', ' ')\
|
||||
.replace('-', '_')\
|
||||
.replace('/', '_')\
|
||||
.replace('%', 'percent_')\
|
||||
.lower()
|
||||
|
||||
|
||||
def parse(
|
||||
data: str,
|
||||
raw: bool = False,
|
||||
@@ -191,29 +207,28 @@ def parse(
|
||||
jc.utils.input_type_check(data)
|
||||
|
||||
raw_output: List = []
|
||||
table_list: List = []
|
||||
header_found = False
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
# check for line starting with # as the start of the table
|
||||
data_list = list(filter(None, data.splitlines()))
|
||||
for line in data_list.copy():
|
||||
if line.startswith('#'):
|
||||
break
|
||||
else:
|
||||
data_list.pop(0)
|
||||
|
||||
if not data_list:
|
||||
for line in data_list:
|
||||
if line.startswith('#'):
|
||||
header_found = True
|
||||
if len(table_list) > 1:
|
||||
raw_output.extend(simple_table_parse(table_list))
|
||||
table_list = [normalize_header(line)]
|
||||
continue
|
||||
|
||||
if header_found:
|
||||
table_list.append(line)
|
||||
|
||||
if len(table_list) > 1:
|
||||
raw_output.extend(simple_table_parse(table_list))
|
||||
|
||||
if not header_found:
|
||||
raise ParseError('Could not parse pidstat output. Make sure to use "pidstat -h".')
|
||||
|
||||
# normalize header
|
||||
data_list[0] = data_list[0].replace('#', ' ')\
|
||||
.replace('/', '_')\
|
||||
.replace('%', 'percent_')\
|
||||
.lower()
|
||||
|
||||
# remove remaining header lines (e.g. pidstat -H 2 5)
|
||||
data_list = [i for i in data_list if not i.startswith('#')]
|
||||
|
||||
raw_output = simple_table_parse(data_list)
|
||||
|
||||
return raw_output if raw else _process(raw_output)
|
||||
|
||||
@@ -34,6 +34,7 @@ Schema:
|
||||
"percent_usr": float,
|
||||
"percent_system": float,
|
||||
"percent_guest": float,
|
||||
"percent_wait": float,
|
||||
"percent_cpu": float,
|
||||
"cpu": integer,
|
||||
"minflt_s": float,
|
||||
@@ -48,6 +49,9 @@ Schema:
|
||||
"kb_ccwr_s": float,
|
||||
"cswch_s": float,
|
||||
"nvcswch_s": float,
|
||||
"usr_ms": integer,
|
||||
"system_ms": integer,
|
||||
"guest_ms": integer,
|
||||
"command": string,
|
||||
|
||||
# below object only exists if using -qq or ignore_exceptions=True
|
||||
@@ -72,7 +76,7 @@ Examples:
|
||||
{"time":"1646859134","uid":"0","pid":"9","percent_usr":"0.00","perc...}
|
||||
...
|
||||
"""
|
||||
from typing import Dict, Iterable, Union
|
||||
from typing import List, Dict, Iterable, Union
|
||||
import jc.utils
|
||||
from jc.streaming import (
|
||||
add_jc_meta, streaming_input_type_check, streaming_line_input_type_check, raise_or_yield
|
||||
@@ -83,7 +87,7 @@ from jc.exceptions import ParseError
|
||||
|
||||
class info():
|
||||
"""Provides parser metadata (version, author, etc.)"""
|
||||
version = '1.1'
|
||||
version = '1.2'
|
||||
description = '`pidstat -H` command streaming parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -107,11 +111,16 @@ def _process(proc_data: Dict) -> Dict:
|
||||
|
||||
Dictionary. Structured data to conform to the schema.
|
||||
"""
|
||||
int_list = {'time', 'uid', 'pid', 'cpu', 'vsz', 'rss', 'stksize', 'stkref'}
|
||||
int_list = {
|
||||
'time', 'uid', 'pid', 'cpu', 'vsz', 'rss', 'stksize', 'stkref',
|
||||
'usr_ms', 'system_ms', 'guest_ms'
|
||||
}
|
||||
|
||||
float_list = {'percent_usr', 'percent_system', 'percent_guest', 'percent_cpu',
|
||||
'minflt_s', 'majflt_s', 'percent_mem', 'kb_rd_s', 'kb_wr_s',
|
||||
'kb_ccwr_s', 'cswch_s', 'nvcswch_s'}
|
||||
float_list = {
|
||||
'percent_usr', 'percent_system', 'percent_guest', 'percent_wait',
|
||||
'percent_cpu', 'minflt_s', 'majflt_s', 'percent_mem', 'kb_rd_s',
|
||||
'kb_wr_s', 'kb_ccwr_s', 'cswch_s', 'nvcswch_s'
|
||||
}
|
||||
|
||||
for key in proc_data:
|
||||
if key in int_list:
|
||||
@@ -123,6 +132,14 @@ def _process(proc_data: Dict) -> Dict:
|
||||
return proc_data
|
||||
|
||||
|
||||
def normalize_header(header: str) -> str:
|
||||
return header.replace('#', ' ')\
|
||||
.replace('-', '_')\
|
||||
.replace('/', '_')\
|
||||
.replace('%', 'percent_')\
|
||||
.lower()
|
||||
|
||||
|
||||
@add_jc_meta
|
||||
def parse(
|
||||
data: Iterable[str],
|
||||
@@ -149,8 +166,8 @@ def parse(
|
||||
jc.utils.compatibility(__name__, info.compatible, quiet)
|
||||
streaming_input_type_check(data)
|
||||
|
||||
found_first_hash = False
|
||||
header = ''
|
||||
table_list: List = []
|
||||
header: str = ''
|
||||
|
||||
for line in data:
|
||||
try:
|
||||
@@ -161,29 +178,30 @@ def parse(
|
||||
# skip blank lines
|
||||
continue
|
||||
|
||||
if not line.startswith('#') and not found_first_hash:
|
||||
# skip preamble lines before header row
|
||||
if line.startswith('#'):
|
||||
if len(table_list) > 1:
|
||||
output_line = simple_table_parse(table_list)[0]
|
||||
yield output_line if raw else _process(output_line)
|
||||
header = ''
|
||||
|
||||
header = normalize_header(line)
|
||||
table_list = [header]
|
||||
continue
|
||||
|
||||
if line.startswith('#') and not found_first_hash:
|
||||
# normalize header
|
||||
header = line.replace('#', ' ')\
|
||||
.replace('/', '_')\
|
||||
.replace('%', 'percent_')\
|
||||
.lower()
|
||||
found_first_hash = True
|
||||
continue
|
||||
|
||||
if line.startswith('#') and found_first_hash:
|
||||
# skip header lines after first one is found
|
||||
continue
|
||||
|
||||
output_line = simple_table_parse([header, line])[0]
|
||||
|
||||
if output_line:
|
||||
if header:
|
||||
table_list.append(line)
|
||||
output_line = simple_table_parse(table_list)[0]
|
||||
yield output_line if raw else _process(output_line)
|
||||
else:
|
||||
raise ParseError('Not pidstat data')
|
||||
table_list = [header]
|
||||
continue
|
||||
|
||||
except Exception as e:
|
||||
yield raise_or_yield(ignore_exceptions, e, line)
|
||||
|
||||
try:
|
||||
if len(table_list) > 1:
|
||||
output_line = simple_table_parse(table_list)[0]
|
||||
yield output_line if raw else _process(output_line)
|
||||
|
||||
except Exception as e:
|
||||
yield raise_or_yield(ignore_exceptions, e, str(table_list))
|
||||
|
||||
@@ -30,6 +30,8 @@ Schema:
|
||||
"packets_received": integer,
|
||||
"packet_loss_percent": float,
|
||||
"duplicates": integer,
|
||||
"errors": integer,
|
||||
"corrupted": integer,
|
||||
"round_trip_ms_min": float,
|
||||
"round_trip_ms_avg": float,
|
||||
"round_trip_ms_max": float,
|
||||
@@ -157,6 +159,7 @@ Examples:
|
||||
]
|
||||
}
|
||||
"""
|
||||
import re
|
||||
import string
|
||||
import ipaddress
|
||||
import jc.utils
|
||||
@@ -164,7 +167,7 @@ import jc.utils
|
||||
|
||||
class info():
|
||||
"""Provides parser metadata (version, author, etc.)"""
|
||||
version = '1.9'
|
||||
version = '1.10'
|
||||
description = '`ping` and `ping6` command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -190,7 +193,7 @@ def _process(proc_data):
|
||||
"""
|
||||
int_list = {
|
||||
'data_bytes', 'packets_transmitted', 'packets_received', 'bytes', 'icmp_seq', 'ttl',
|
||||
'duplicates', 'vr', 'hl', 'tos', 'len', 'id', 'flg', 'off', 'pro', 'cks'
|
||||
'duplicates', 'corrupted', 'errors', 'vr', 'hl', 'tos', 'len', 'id', 'flg', 'off', 'pro', 'cks'
|
||||
}
|
||||
|
||||
float_list = {
|
||||
@@ -321,41 +324,47 @@ def _linux_parse(data):
|
||||
continue
|
||||
|
||||
if footer:
|
||||
if 'packets transmitted' in line:
|
||||
if ' duplicates,' in line:
|
||||
raw_output.update(
|
||||
{
|
||||
'packets_transmitted': line.split()[0],
|
||||
'packets_received': line.split()[3],
|
||||
'packet_loss_percent': line.split()[7].rstrip('%'),
|
||||
'duplicates': line.split()[5].lstrip('+'),
|
||||
'time_ms': line.split()[11].replace('ms', '')
|
||||
}
|
||||
)
|
||||
continue
|
||||
else:
|
||||
raw_output.update(
|
||||
{
|
||||
'packets_transmitted': line.split()[0],
|
||||
'packets_received': line.split()[3],
|
||||
'packet_loss_percent': line.split()[5].rstrip('%'),
|
||||
'duplicates': '0',
|
||||
'time_ms': line.split()[9].replace('ms', '')
|
||||
}
|
||||
)
|
||||
continue
|
||||
# Init in zero, to keep compatibility with previous behaviour
|
||||
if 'duplicates' not in raw_output:
|
||||
raw_output['duplicates'] = '0'
|
||||
|
||||
else:
|
||||
split_line = line.split(' = ')[1]
|
||||
split_line = split_line.split('/')
|
||||
raw_output.update(
|
||||
{
|
||||
'round_trip_ms_min': split_line[0],
|
||||
'round_trip_ms_avg': split_line[1],
|
||||
'round_trip_ms_max': split_line[2],
|
||||
'round_trip_ms_stddev': split_line[3].split()[0]
|
||||
}
|
||||
)
|
||||
#
|
||||
# See: https://github.com/dgibson/iputils/blob/master/ping_common.c#L995
|
||||
#
|
||||
m = re.search(r'(\d+) packets transmitted', line)
|
||||
if m:
|
||||
raw_output['packets_transmitted'] = m.group(1)
|
||||
|
||||
m = re.search(r'(\d+) received,', line)
|
||||
if m:
|
||||
raw_output['packets_received'] = m.group(1)
|
||||
|
||||
m = re.search(r'[+](\d+) duplicates', line)
|
||||
if m:
|
||||
raw_output['duplicates'] = m.group(1)
|
||||
|
||||
m = re.search(r'[+](\d+) corrupted', line)
|
||||
if m:
|
||||
raw_output['corrupted'] = m.group(1)
|
||||
|
||||
m = re.search(r'[+](\d+) errors', line)
|
||||
if m:
|
||||
raw_output['errors'] = m.group(1)
|
||||
|
||||
m = re.search(r'([\d\.]+)% packet loss', line)
|
||||
if m:
|
||||
raw_output['packet_loss_percent'] = m.group(1)
|
||||
|
||||
m = re.search(r'time (\d+)ms', line)
|
||||
if m:
|
||||
raw_output['time_ms'] = m.group(1)
|
||||
|
||||
m = re.search(r'rtt min\/avg\/max\/mdev += +([\d\.]+)\/([\d\.]+)\/([\d\.]+)\/([\d\.]+) ms', line)
|
||||
if m:
|
||||
raw_output['round_trip_ms_min'] = m.group(1)
|
||||
raw_output['round_trip_ms_avg'] = m.group(2)
|
||||
raw_output['round_trip_ms_max'] = m.group(3)
|
||||
raw_output['round_trip_ms_stddev'] = m.group(4)
|
||||
|
||||
# ping response lines
|
||||
else:
|
||||
|
||||
@@ -31,7 +31,7 @@ Schema:
|
||||
"source_ip": string,
|
||||
"destination_ip": string,
|
||||
"sent_bytes": integer,
|
||||
"pattern": string, # (null if not set)
|
||||
"pattern": string, # null if not set
|
||||
"destination": string,
|
||||
"timestamp": float,
|
||||
"response_bytes": integer,
|
||||
@@ -44,10 +44,12 @@ Schema:
|
||||
"packets_received": integer,
|
||||
"packet_loss_percent": float,
|
||||
"duplicates": integer,
|
||||
"round_trip_ms_min": float,
|
||||
"round_trip_ms_avg": float,
|
||||
"round_trip_ms_max": float,
|
||||
"round_trip_ms_stddev": float,
|
||||
"errors": integer, # null if not set
|
||||
"corrupted": integer, # null if not set
|
||||
"round_trip_ms_min": float, # null if not set
|
||||
"round_trip_ms_avg": float, # null if not set
|
||||
"round_trip_ms_max": float, # null if not set
|
||||
"round_trip_ms_stddev": float, # null if not set
|
||||
|
||||
# below object only exists if using -qq or ignore_exceptions=True
|
||||
"_jc_meta": {
|
||||
@@ -74,6 +76,7 @@ Examples:
|
||||
{"type":"reply","destination_ip":"1.1.1.1","sent_bytes":"56","patte...}
|
||||
...
|
||||
"""
|
||||
import re
|
||||
import string
|
||||
import ipaddress
|
||||
import jc.utils
|
||||
@@ -85,7 +88,7 @@ from jc.exceptions import ParseError
|
||||
|
||||
class info():
|
||||
"""Provides parser metadata (version, author, etc.)"""
|
||||
version = '1.3'
|
||||
version = '1.4'
|
||||
description = '`ping` and `ping6` command streaming parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -110,13 +113,14 @@ def _process(proc_data):
|
||||
Dictionary. Structured data to conform to the schema.
|
||||
"""
|
||||
int_list = {
|
||||
'sent_bytes', 'packets_transmitted', 'packets_received', 'response_bytes', 'icmp_seq',
|
||||
'ttl', 'duplicates', 'vr', 'hl', 'tos', 'len', 'id', 'flg', 'off', 'pro', 'cks'
|
||||
'sent_bytes', 'packets_transmitted', 'packets_received',
|
||||
'response_bytes', 'icmp_seq', 'ttl', 'duplicates', 'vr', 'hl', 'tos',
|
||||
'len', 'id', 'flg', 'off', 'pro', 'cks', 'errors', 'corrupted'
|
||||
}
|
||||
|
||||
float_list = {
|
||||
'packet_loss_percent', 'round_trip_ms_min', 'round_trip_ms_avg', 'round_trip_ms_max',
|
||||
'round_trip_ms_stddev', 'timestamp', 'time_ms'
|
||||
'packet_loss_percent', 'round_trip_ms_min', 'round_trip_ms_avg',
|
||||
'round_trip_ms_max', 'round_trip_ms_stddev', 'timestamp', 'time_ms'
|
||||
}
|
||||
|
||||
for key in proc_data:
|
||||
@@ -144,6 +148,12 @@ class _state:
|
||||
packet_loss_percent = None
|
||||
time_ms = None
|
||||
duplicates = None
|
||||
corrupted = None
|
||||
errors = None
|
||||
round_trip_ms_min = None
|
||||
round_trip_ms_avg = None
|
||||
round_trip_ms_max = None
|
||||
round_trip_ms_stddev = None
|
||||
|
||||
|
||||
def _ipv6_in(line):
|
||||
@@ -369,24 +379,44 @@ def _linux_parse(line, s):
|
||||
return None
|
||||
|
||||
if s.footer:
|
||||
if 'packets transmitted' in line:
|
||||
if ' duplicates,' in line:
|
||||
s.packets_transmitted = line.split()[0]
|
||||
s.packets_received = line.split()[3]
|
||||
s.packet_loss_percent = line.split()[7].rstrip('%')
|
||||
s.duplicates = line.split()[5].lstrip('+')
|
||||
s.time_ms = line.split()[11].replace('ms', '')
|
||||
return None
|
||||
#
|
||||
# See: https://github.com/dgibson/iputils/blob/master/ping_common.c#L995
|
||||
#
|
||||
m = re.search(r'(\d+) packets transmitted', line)
|
||||
if m:
|
||||
s.packets_transmitted = m.group(1)
|
||||
|
||||
s.packets_transmitted = line.split()[0]
|
||||
s.packets_received = line.split()[3]
|
||||
s.packet_loss_percent = line.split()[5].rstrip('%')
|
||||
s.duplicates = '0'
|
||||
s.time_ms = line.split()[9].replace('ms', '')
|
||||
return None
|
||||
m = re.search(r'(\d+) received,', line)
|
||||
if m:
|
||||
s.packets_received = m.group(1)
|
||||
|
||||
m = re.search(r'[+](\d+) duplicates', line)
|
||||
if m:
|
||||
s.duplicates = m.group(1)
|
||||
|
||||
m = re.search(r'[+](\d+) corrupted', line)
|
||||
if m:
|
||||
s.corrupted = m.group(1)
|
||||
|
||||
m = re.search(r'[+](\d+) errors', line)
|
||||
if m:
|
||||
s.errors = m.group(1)
|
||||
|
||||
m = re.search(r'([\d\.]+)% packet loss', line)
|
||||
if m:
|
||||
s.packet_loss_percent = m.group(1)
|
||||
|
||||
m = re.search(r'time (\d+)ms', line)
|
||||
if m:
|
||||
s.time_ms = m.group(1)
|
||||
|
||||
m = re.search(r'rtt min\/avg\/max\/mdev += +([\d\.]+)\/([\d\.]+)\/([\d\.]+)\/([\d\.]+) ms', line)
|
||||
if m:
|
||||
s.round_trip_ms_min = m.group(1)
|
||||
s.round_trip_ms_avg = m.group(2)
|
||||
s.round_trip_ms_max = m.group(3)
|
||||
s.round_trip_ms_stddev = m.group(4)
|
||||
|
||||
split_line = line.split(' = ')[1]
|
||||
split_line = split_line.split('/')
|
||||
output_line = {
|
||||
'type': 'summary',
|
||||
'destination_ip': s.destination_ip or None,
|
||||
@@ -394,15 +424,16 @@ def _linux_parse(line, s):
|
||||
'pattern': s.pattern or None,
|
||||
'packets_transmitted': s.packets_transmitted or None,
|
||||
'packets_received': s.packets_received or None,
|
||||
'packet_loss_percent': s.packet_loss_percent or None,
|
||||
'duplicates': s.duplicates or None,
|
||||
'time_ms': s.time_ms or None,
|
||||
'round_trip_ms_min': split_line[0],
|
||||
'round_trip_ms_avg': split_line[1],
|
||||
'round_trip_ms_max': split_line[2],
|
||||
'round_trip_ms_stddev': split_line[3].split()[0]
|
||||
'packet_loss_percent': s.packet_loss_percent,
|
||||
'duplicates': s.duplicates or '0',
|
||||
'errors': s.errors,
|
||||
'corrupted': s.corrupted,
|
||||
'time_ms': s.time_ms,
|
||||
'round_trip_ms_min': s.round_trip_ms_min,
|
||||
'round_trip_ms_avg': s.round_trip_ms_avg,
|
||||
'round_trip_ms_max': s.round_trip_ms_max,
|
||||
'round_trip_ms_stddev': s.round_trip_ms_stddev
|
||||
}
|
||||
|
||||
return output_line
|
||||
|
||||
# ping response lines
|
||||
@@ -488,6 +519,7 @@ def parse(data, raw=False, quiet=False, ignore_exceptions=False):
|
||||
streaming_input_type_check(data)
|
||||
|
||||
s = _state()
|
||||
summary_obj = {}
|
||||
|
||||
for line in data:
|
||||
try:
|
||||
@@ -528,6 +560,12 @@ def parse(data, raw=False, quiet=False, ignore_exceptions=False):
|
||||
if s.os_detected and s.linux:
|
||||
output_line = _linux_parse(line, s)
|
||||
|
||||
# summary can be multiple lines so don't output until the end
|
||||
if output_line:
|
||||
if output_line.get('type', None) == 'summary':
|
||||
summary_obj = output_line
|
||||
continue
|
||||
|
||||
elif s.os_detected and s.bsd:
|
||||
output_line = _bsd_parse(line, s)
|
||||
|
||||
@@ -542,3 +580,10 @@ def parse(data, raw=False, quiet=False, ignore_exceptions=False):
|
||||
|
||||
except Exception as e:
|
||||
yield raise_or_yield(ignore_exceptions, e, line)
|
||||
|
||||
# yield summary, if it exists
|
||||
try:
|
||||
if summary_obj:
|
||||
yield summary_obj if raw else _process(summary_obj)
|
||||
except Exception as e:
|
||||
yield raise_or_yield(ignore_exceptions, e, str(summary_obj))
|
||||
|
||||
220
jc/parsers/pkg_index_apk.py
Normal file
220
jc/parsers/pkg_index_apk.py
Normal file
@@ -0,0 +1,220 @@
|
||||
"""jc - JSON Convert Alpine Linux Package Index files
|
||||
|
||||
Usage (cli):
|
||||
|
||||
$ cat APKINDEX | jc --pkg-index-apk
|
||||
|
||||
Usage (module):
|
||||
|
||||
import jc
|
||||
result = jc.parse('pkg_index_apk', pkg_index_apk_output)
|
||||
|
||||
Schema:
|
||||
|
||||
[
|
||||
{
|
||||
"checksum": string,
|
||||
"package": string,
|
||||
"version": string,
|
||||
"architecture": string,
|
||||
"package_size": integer,
|
||||
"installed_size": integer,
|
||||
"description": string,
|
||||
"url": string,
|
||||
"license": string,
|
||||
"origin": string,
|
||||
"maintainer": {
|
||||
"name": string,
|
||||
"email": string,
|
||||
},
|
||||
"build_time": integer,
|
||||
"commit": string,
|
||||
"provider_priority": string,
|
||||
"dependencies": [
|
||||
string
|
||||
],
|
||||
"provides": [
|
||||
string
|
||||
],
|
||||
"install_if": [
|
||||
string
|
||||
],
|
||||
}
|
||||
]
|
||||
|
||||
Example:
|
||||
|
||||
$ cat APKINDEX | jc --pkg-index-apk
|
||||
[
|
||||
{
|
||||
"checksum": "Q1znBl9k+RKgY6gl5Eg3iz73KZbLY=",
|
||||
"package": "yasm",
|
||||
"version": "1.3.0-r4",
|
||||
"architecture": "x86_64",
|
||||
"package_size": 772109,
|
||||
"installed_size": 1753088,
|
||||
"description": "A rewrite of NASM to allow for multiple synta...",
|
||||
"url": "http://www.tortall.net/projects/yasm/",
|
||||
"license": "BSD-2-Clause",
|
||||
"origin": "yasm",
|
||||
"maintainer": {
|
||||
"name": "Natanael Copa",
|
||||
"email": "ncopa@alpinelinux.org"
|
||||
},
|
||||
"build_time": 1681228881,
|
||||
"commit": "84a227baf001b6e0208e3352b294e4d7a40e93de",
|
||||
"dependencies": [
|
||||
"so:libc.musl-x86_64.so.1"
|
||||
],
|
||||
"provides": [
|
||||
"cmd:vsyasm=1.3.0-r4",
|
||||
"cmd:yasm=1.3.0-r4",
|
||||
"cmd:ytasm=1.3.0-r4"
|
||||
]
|
||||
}
|
||||
]
|
||||
|
||||
$ cat APKINDEX | jc --pkg-index-apk --raw
|
||||
[
|
||||
{
|
||||
"C": "Q1znBl9k+RKgY6gl5Eg3iz73KZbLY=",
|
||||
"P": "yasm",
|
||||
"V": "1.3.0-r4",
|
||||
"A": "x86_64",
|
||||
"S": "772109",
|
||||
"I": "1753088",
|
||||
"T": "A rewrite of NASM to allow for multiple syntax supported...",
|
||||
"U": "http://www.tortall.net/projects/yasm/",
|
||||
"L": "BSD-2-Clause",
|
||||
"o": "yasm",
|
||||
"m": "Natanael Copa <ncopa@alpinelinux.org>",
|
||||
"t": "1681228881",
|
||||
"c": "84a227baf001b6e0208e3352b294e4d7a40e93de",
|
||||
"D": "so:libc.musl-x86_64.so.1",
|
||||
"p": "cmd:vsyasm=1.3.0-r4 cmd:yasm=1.3.0-r4 cmd:ytasm=1.3.0-r4"
|
||||
},
|
||||
]
|
||||
"""
|
||||
import re
|
||||
from typing import List, Dict, Union
|
||||
import jc.utils
|
||||
|
||||
|
||||
class info:
|
||||
"""Provides parser metadata (version, author, etc.)"""
|
||||
version = "1.0"
|
||||
description = "Alpine Linux Package Index file parser"
|
||||
author = "Roey Darwish Dror"
|
||||
author_email = "roey.ghost@gmail.com"
|
||||
compatible = ['linux', 'darwin', 'cygwin', 'win32', 'aix', 'freebsd']
|
||||
tags = ['standard', 'file', 'string']
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
|
||||
_KEY = {
|
||||
"C": "checksum",
|
||||
"P": "package",
|
||||
"V": "version",
|
||||
"A": "architecture",
|
||||
"S": "package_size",
|
||||
"I": "installed_size",
|
||||
"T": "description",
|
||||
"U": "url",
|
||||
"L": "license",
|
||||
"o": "origin",
|
||||
"m": "maintainer",
|
||||
"t": "build_time",
|
||||
"c": "commit",
|
||||
"k": "provider_priority",
|
||||
"D": "dependencies",
|
||||
"p": "provides",
|
||||
"i": "install_if"
|
||||
}
|
||||
|
||||
def _value(key: str, value: str) -> Union[str, int, List[str], Dict[str, str]]:
|
||||
"""
|
||||
Convert value to the appropriate type
|
||||
|
||||
Parameters:
|
||||
|
||||
key: (string) key name
|
||||
value: (string) value to convert
|
||||
|
||||
Returns:
|
||||
|
||||
Converted value
|
||||
"""
|
||||
if key in ['S', 'I', 't', 'k']:
|
||||
return int(value)
|
||||
|
||||
if key in ['D', 'p', 'i']:
|
||||
splitted = value.split(' ')
|
||||
return splitted
|
||||
|
||||
if key == "m":
|
||||
m = re.match(r'(.*) <(.*)>', value)
|
||||
if m:
|
||||
return {'name': m.group(1), 'email': m.group(2)}
|
||||
else:
|
||||
return {'name': value}
|
||||
|
||||
return value
|
||||
|
||||
|
||||
def _process(proc_data: List[Dict]) -> List[Dict]:
|
||||
"""
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (List of Dictionaries) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
List of Dictionaries. Structured to conform to the schema.
|
||||
"""
|
||||
return [{_KEY.get(k, k): _value(k, v) for k, v in d.items()} for d in proc_data]
|
||||
|
||||
|
||||
def parse(data: str, raw: bool = False, quiet: bool = False) -> List[Dict]:
|
||||
"""
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) unprocessed output if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of Dictionaries. Raw or processed structured data.
|
||||
"""
|
||||
jc.utils.compatibility(__name__, info.compatible, quiet)
|
||||
jc.utils.input_type_check(data)
|
||||
|
||||
raw_output: List[dict] = []
|
||||
|
||||
package: Dict = {}
|
||||
if jc.utils.has_data(data):
|
||||
lines = iter(data.splitlines())
|
||||
for line in lines:
|
||||
line = line.strip()
|
||||
if not line:
|
||||
if package:
|
||||
raw_output.append(package)
|
||||
package = {}
|
||||
|
||||
continue
|
||||
|
||||
key = line[0]
|
||||
value = line[2:].strip()
|
||||
assert key not in package
|
||||
package[key] = value
|
||||
|
||||
if package:
|
||||
raw_output.append(package)
|
||||
|
||||
return raw_output if raw else _process(raw_output)
|
||||
148
jc/parsers/pkg_index_deb.py
Normal file
148
jc/parsers/pkg_index_deb.py
Normal file
@@ -0,0 +1,148 @@
|
||||
"""jc - JSON Convert Debian Package Index file parser
|
||||
|
||||
Usage (cli):
|
||||
|
||||
$ cat Packages | jc --pkg-index-deb
|
||||
|
||||
Usage (module):
|
||||
|
||||
import jc
|
||||
result = jc.parse('pkg_index_deb', pkg_index_deb_output)
|
||||
|
||||
Schema:
|
||||
|
||||
[
|
||||
{
|
||||
"package": string,
|
||||
"version": string,
|
||||
"architecture": string,
|
||||
"section": string,
|
||||
"priority": string,
|
||||
"installed_size": integer,
|
||||
"maintainer": string,
|
||||
"description": string,
|
||||
"homepage": string,
|
||||
"depends": string,
|
||||
"conflicts": string,
|
||||
"replaces": string,
|
||||
"vcs_git": string,
|
||||
"sha256": string,
|
||||
"size": integer,
|
||||
"vcs_git": string,
|
||||
"filename": string
|
||||
}
|
||||
]
|
||||
|
||||
Examples:
|
||||
|
||||
$ cat Packages | jc --pkg-index-deb
|
||||
[
|
||||
{
|
||||
"package": "aspnetcore-runtime-2.1",
|
||||
"version": "2.1.22-1",
|
||||
"architecture": "amd64",
|
||||
"section": "devel",
|
||||
"priority": "standard",
|
||||
"installed_size": 71081,
|
||||
"maintainer": "Microsoft <nugetaspnet@microsoft.com>",
|
||||
"description": "Microsoft ASP.NET Core 2.1.22 Shared Framework",
|
||||
"homepage": "https://www.asp.net/",
|
||||
"depends": "libc6 (>= 2.14), dotnet-runtime-2.1 (>= 2.1.22)",
|
||||
"sha256": "48d4e78a7ceff34105411172f4c3e91a0359b3929d84d26a493...",
|
||||
"size": 21937036,
|
||||
"filename": "pool/main/a/aspnetcore-runtime-2.1/aspnetcore-run..."
|
||||
},
|
||||
{
|
||||
"package": "azure-functions-core-tools-4",
|
||||
"version": "4.0.4590-1",
|
||||
"architecture": "amd64",
|
||||
"section": "devel",
|
||||
"priority": "optional",
|
||||
"maintainer": "Ahmed ElSayed <ahmels@microsoft.com>",
|
||||
"description": "Azure Function Core Tools v4",
|
||||
"homepage": "https://docs.microsoft.com/en-us/azure/azure-func...",
|
||||
"conflicts": "azure-functions-core-tools-2, azure-functions-co...",
|
||||
"replaces": "azure-functions-core-tools-2, azure-functions-cor...",
|
||||
"vcs_git": "https://github.com/Azure/azure-functions-core-tool...",
|
||||
"sha256": "a2a4f99d6d98ba0a46832570285552f2a93bab06cebbda2afc7...",
|
||||
"size": 124417844,
|
||||
"filename": "pool/main/a/azure-functions-core-tools-4/azure-fu..."
|
||||
}
|
||||
]
|
||||
|
||||
$ cat Packages | jc --pkg-index-deb -r
|
||||
[
|
||||
{
|
||||
"package": "aspnetcore-runtime-2.1",
|
||||
"version": "2.1.22-1",
|
||||
"architecture": "amd64",
|
||||
"section": "devel",
|
||||
"priority": "standard",
|
||||
"installed_size": "71081",
|
||||
"maintainer": "Microsoft <nugetaspnet@microsoft.com>",
|
||||
"description": "Microsoft ASP.NET Core 2.1.22 Shared Framework",
|
||||
"homepage": "https://www.asp.net/",
|
||||
"depends": "libc6 (>= 2.14), dotnet-runtime-2.1 (>= 2.1.22)",
|
||||
"sha256": "48d4e78a7ceff34105411172f4c3e91a0359b3929d84d26a493...",
|
||||
"size": "21937036",
|
||||
"filename": "pool/main/a/aspnetcore-runtime-2.1/aspnetcore-run..."
|
||||
},
|
||||
{
|
||||
"package": "azure-functions-core-tools-4",
|
||||
"version": "4.0.4590-1",
|
||||
"architecture": "amd64",
|
||||
"section": "devel",
|
||||
"priority": "optional",
|
||||
"maintainer": "Ahmed ElSayed <ahmels@microsoft.com>",
|
||||
"description": "Azure Function Core Tools v4",
|
||||
"homepage": "https://docs.microsoft.com/en-us/azure/azure-func...",
|
||||
"conflicts": "azure-functions-core-tools-2, azure-functions-co...",
|
||||
"replaces": "azure-functions-core-tools-2, azure-functions-cor...",
|
||||
"vcs_git": "https://github.com/Azure/azure-functions-core-tool...",
|
||||
"sha256": "a2a4f99d6d98ba0a46832570285552f2a93bab06cebbda2afc7...",
|
||||
"size": "124417844",
|
||||
"filename": "pool/main/a/azure-functions-core-tools-4/azure-fu..."
|
||||
}
|
||||
]
|
||||
"""
|
||||
from typing import List
|
||||
from jc.jc_types import JSONDictType
|
||||
import jc.parsers.rpm_qi as rpm_qi
|
||||
|
||||
|
||||
class info():
|
||||
"""Provides parser metadata (version, author, etc.)"""
|
||||
version = '1.0'
|
||||
description = 'Debian Package Index file parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
details = 'Using the rpm-qi parser'
|
||||
compatible = ['linux', 'darwin', 'cygwin', 'win32', 'aix', 'freebsd']
|
||||
tags = ['file']
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
|
||||
def parse(
|
||||
data: str,
|
||||
raw: bool = False,
|
||||
quiet: bool = False
|
||||
) -> List[JSONDictType]:
|
||||
"""
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) unprocessed output if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of Dictionaries. Raw or processed structured data.
|
||||
"""
|
||||
# This parser is an alias of rpm_qi.py
|
||||
rpm_qi.info.compatible = ['linux', 'darwin', 'cygwin', 'win32', 'aix', 'freebsd']
|
||||
rpm_qi.info.tags = ['file']
|
||||
return rpm_qi.parse(data, raw, quiet)
|
||||
@@ -120,7 +120,7 @@ from jc.exceptions import ParseError
|
||||
|
||||
class info():
|
||||
"""Provides parser metadata (version, author, etc.)"""
|
||||
version = '1.1'
|
||||
version = '1.2'
|
||||
description = '`/proc/` file parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -154,6 +154,7 @@ def parse(
|
||||
if jc.utils.has_data(data):
|
||||
# signatures
|
||||
buddyinfo_p = re.compile(r'^Node \d+, zone\s+\w+\s+(?:\d+\s+){11}\n')
|
||||
cmdline_p = re.compile(r'^BOOT_IMAGE=')
|
||||
consoles_p = re.compile(r'^\w+\s+[\-WUR]{3} \([ECBpba ]+\)\s+\d+:\d+\n')
|
||||
cpuinfo_p = re.compile(r'^processor\t+: \d+.*bogomips\t+: \d+.\d\d\n', re.DOTALL)
|
||||
crypto_p = re.compile(r'^name\s+:.*\ndriver\s+:.*\nmodule\s+:.*\n')
|
||||
@@ -194,6 +195,7 @@ def parse(
|
||||
net_packet_p = re.compile(r'^sk RefCnt Type Proto Iface R Rmem User Inode\n')
|
||||
net_protocols_p = re.compile(r'^protocol size sockets memory press maxhdr slab module cl co di ac io in de sh ss gs se re sp bi br ha uh gp em\n')
|
||||
net_route_p = re.compile(r'^Iface\tDestination\tGateway \tFlags\tRefCnt\tUse\tMetric\tMask\t\tMTU\tWindow\tIRTT\s+\n')
|
||||
net_tcp_p = re.compile(r'^\s+sl\s+local_address\s+(?:rem_address|remote_address)\s+st\s+tx_queue\s+rx_queue\s+tr\s+tm->when\s+retrnsmt\s+uid\s+timeout\s+inode')
|
||||
net_unix_p = re.compile(r'^Num RefCount Protocol Flags Type St Inode Path\n')
|
||||
|
||||
pid_fdinfo_p = re.compile(r'^pos:\t\d+\nflags:\t\d+\nmnt_id:\t\d+\n')
|
||||
@@ -211,6 +213,7 @@ def parse(
|
||||
|
||||
procmap = {
|
||||
buddyinfo_p: 'proc_buddyinfo',
|
||||
cmdline_p: 'proc_cmdline',
|
||||
consoles_p: 'proc_consoles',
|
||||
cpuinfo_p: 'proc_cpuinfo',
|
||||
crypto_p: 'proc_crypto',
|
||||
@@ -249,6 +252,7 @@ def parse(
|
||||
net_packet_p: 'proc_net_packet',
|
||||
net_protocols_p: 'proc_net_protocols',
|
||||
net_route_p: 'proc_net_route',
|
||||
net_tcp_p: 'proc_net_tcp',
|
||||
net_unix_p: 'proc_net_unix',
|
||||
net_ipv6_route_p: 'proc_net_ipv6_route', # before net_dev_mcast
|
||||
net_dev_mcast_p: 'proc_net_dev_mcast', # after net_ipv6_route
|
||||
|
||||
138
jc/parsers/proc_cmdline.py
Normal file
138
jc/parsers/proc_cmdline.py
Normal file
@@ -0,0 +1,138 @@
|
||||
"""jc - JSON Convert `/proc/cmdline` file parser
|
||||
|
||||
Usage (cli):
|
||||
|
||||
$ cat /proc/cmdline | jc --proc
|
||||
|
||||
or
|
||||
|
||||
$ jc /proc/cmdline
|
||||
|
||||
or
|
||||
|
||||
$ cat /proc/cmdline | jc --proc-cmdline
|
||||
|
||||
Usage (module):
|
||||
|
||||
import jc
|
||||
result = jc.parse('proc_cmdline', proc_cmdline_file)
|
||||
|
||||
Schema:
|
||||
|
||||
{
|
||||
"<key>": string,
|
||||
"_options": [
|
||||
string
|
||||
]
|
||||
}
|
||||
|
||||
Examples:
|
||||
|
||||
$ cat /proc/cmdline | jc --proc -p
|
||||
{
|
||||
"BOOT_IMAGE": "clonezilla/live/vmlinuz",
|
||||
"consoleblank": "0",
|
||||
"keyboard-options": "grp:ctrl_shift_toggle,lctrl_shift_toggle",
|
||||
"ethdevice-timeout": "130",
|
||||
"toram": "filesystem.squashfs",
|
||||
"boot": "live",
|
||||
"edd": "on",
|
||||
"ocs_daemonon": "ssh lighttpd",
|
||||
"ocs_live_run": "sudo screen /usr/sbin/ocs-sr -g auto -e1 auto -e2 -batch -r -j2 -k -scr -p true restoreparts win7-64 sda1",
|
||||
"ocs_live_extra_param": "",
|
||||
"keyboard-layouts": "us,ru",
|
||||
"ocs_live_batch": "no",
|
||||
"locales": "ru_RU.UTF-8",
|
||||
"vga": "788",
|
||||
"net.ifnames": "0",
|
||||
"union": "overlay",
|
||||
"fetch": "http://10.1.1.1/tftpboot/clonezilla/live/filesystem.squashfs",
|
||||
"ocs_postrun99": "sudo reboot",
|
||||
"initrd": "clonezilla/live/initrd.img",
|
||||
"_options": [
|
||||
"config",
|
||||
"noswap",
|
||||
"nolocales",
|
||||
"nomodeset",
|
||||
"noprompt",
|
||||
"nosplash",
|
||||
"nodmraid",
|
||||
"components"
|
||||
]
|
||||
}
|
||||
"""
|
||||
import shlex
|
||||
from typing import List, Dict
|
||||
from jc.jc_types import JSONDictType
|
||||
import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
"""Provides parser metadata (version, author, etc.)"""
|
||||
version = '1.0'
|
||||
description = '`/proc/cmdline` file parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
compatible = ['linux']
|
||||
tags = ['file']
|
||||
hidden = True
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
|
||||
def _process(proc_data: JSONDictType) -> JSONDictType:
|
||||
"""
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (List of Dictionaries) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Structured to conform to the schema.
|
||||
"""
|
||||
return proc_data
|
||||
|
||||
|
||||
def parse(
|
||||
data: str,
|
||||
raw: bool = False,
|
||||
quiet: bool = False
|
||||
) -> JSONDictType:
|
||||
"""
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) unprocessed output if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Raw or processed structured data.
|
||||
"""
|
||||
jc.utils.compatibility(__name__, info.compatible, quiet)
|
||||
jc.utils.input_type_check(data)
|
||||
|
||||
raw_output: Dict = {}
|
||||
options: List = []
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
split_line = shlex.split(data)
|
||||
|
||||
for item in split_line:
|
||||
if '=' in item:
|
||||
key, val = item.split('=', maxsplit=1)
|
||||
raw_output[key] = val
|
||||
|
||||
else:
|
||||
options.append(item)
|
||||
|
||||
if options:
|
||||
raw_output['_options'] = options
|
||||
|
||||
return raw_output if raw else _process(raw_output)
|
||||
293
jc/parsers/proc_net_tcp.py
Normal file
293
jc/parsers/proc_net_tcp.py
Normal file
@@ -0,0 +1,293 @@
|
||||
"""jc - JSON Convert `/proc/net/tcp` and `proc/net/tcp6` file parser
|
||||
|
||||
IPv4 and IPv6 addresses are converted to standard notation unless the raw
|
||||
(--raw) option is used.
|
||||
|
||||
Usage (cli):
|
||||
|
||||
$ cat /proc/net/tcp | jc --proc
|
||||
|
||||
or
|
||||
|
||||
$ jc /proc/net/tcp
|
||||
|
||||
or
|
||||
|
||||
$ cat /proc/net/tcp | jc --proc-net-tcp
|
||||
|
||||
Usage (module):
|
||||
|
||||
import jc
|
||||
result = jc.parse('proc', proc_net_tcp_file)
|
||||
|
||||
or
|
||||
|
||||
import jc
|
||||
result = jc.parse('proc_net_tcp', proc_net_tcp_file)
|
||||
|
||||
Schema:
|
||||
|
||||
Field names and types gathered from the following:
|
||||
|
||||
https://www.kernel.org/doc/Documentation/networking/proc_net_tcp.txt
|
||||
|
||||
https://github.com/torvalds/linux/blob/master/net/ipv4/tcp_ipv4.c
|
||||
|
||||
https://github.com/torvalds/linux/blob/master/net/ipv6/tcp_ipv6.c
|
||||
|
||||
[
|
||||
{
|
||||
"entry": integer,
|
||||
"local_address": string,
|
||||
"local_port": integer,
|
||||
"remote_address": string,
|
||||
"remote_port": integer,
|
||||
"state": string,
|
||||
"tx_queue": string,
|
||||
"rx_queue": string,
|
||||
"timer_active": integer,
|
||||
"jiffies_until_timer_expires": string,
|
||||
"unrecovered_rto_timeouts": string,
|
||||
"uid": integer,
|
||||
"unanswered_0_window_probes": integer,
|
||||
"inode": integer,
|
||||
"sock_ref_count": integer,
|
||||
"sock_mem_loc": string,
|
||||
"retransmit_timeout": integer,
|
||||
"soft_clock_tick": integer,
|
||||
"ack_quick_pingpong": integer,
|
||||
"sending_congestion_window": integer,
|
||||
"slow_start_size_threshold": integer
|
||||
}
|
||||
]
|
||||
|
||||
Examples:
|
||||
|
||||
$ cat /proc/net/tcp | jc --proc -p
|
||||
[
|
||||
{
|
||||
"entry": "0",
|
||||
"local_address": "10.0.0.28",
|
||||
"local_port": 42082,
|
||||
"remote_address": "64.12.0.108",
|
||||
"remote_port": 80,
|
||||
"state": "04",
|
||||
"tx_queue": "00000001",
|
||||
"rx_queue": "00000000",
|
||||
"timer_active": 1,
|
||||
"jiffies_until_timer_expires": "00000015",
|
||||
"unrecovered_rto_timeouts": "00000000",
|
||||
"uid": 0,
|
||||
"unanswered_0_window_probes": 0,
|
||||
"inode": 0,
|
||||
"sock_ref_count": 3,
|
||||
"sock_mem_loc": "ffff8c7a0de930c0",
|
||||
"retransmit_timeout": 21,
|
||||
"soft_clock_tick": 4,
|
||||
"ack_quick_pingpong": 30,
|
||||
"sending_congestion_window": 10,
|
||||
"slow_start_size_threshold": -1
|
||||
},
|
||||
{
|
||||
"entry": "1",
|
||||
"local_address": "10.0.0.28",
|
||||
"local_port": 38864,
|
||||
"remote_address": "104.244.42.65",
|
||||
"remote_port": 80,
|
||||
"state": "06",
|
||||
"tx_queue": "00000000",
|
||||
"rx_queue": "00000000",
|
||||
"timer_active": 3,
|
||||
"jiffies_until_timer_expires": "000007C5",
|
||||
"unrecovered_rto_timeouts": "00000000",
|
||||
"uid": 0,
|
||||
"unanswered_0_window_probes": 0,
|
||||
"inode": 0,
|
||||
"sock_ref_count": 3,
|
||||
"sock_mem_loc": "ffff8c7a12d31aa0"
|
||||
},
|
||||
...
|
||||
]
|
||||
|
||||
$ cat /proc/net/tcp | jc --proc -p -r
|
||||
[
|
||||
{
|
||||
"entry": "1",
|
||||
"local_address": "1C00000A",
|
||||
"local_port": "A462",
|
||||
"remote_address": "6C000C40",
|
||||
"remote_port": "0050",
|
||||
"state": "04",
|
||||
"tx_queue": "00000001",
|
||||
"rx_queue": "00000000",
|
||||
"timer_active": "01",
|
||||
"jiffies_until_timer_expires": "00000015",
|
||||
"unrecovered_rto_timeouts": "00000000",
|
||||
"uid": "0",
|
||||
"unanswered_0_window_probes": "0",
|
||||
"inode": "0",
|
||||
"sock_ref_count": "3",
|
||||
"sock_mem_loc": "ffff8c7a0de930c0",
|
||||
"retransmit_timeout": "21",
|
||||
"soft_clock_tick": "4",
|
||||
"ack_quick_pingpong": "30",
|
||||
"sending_congestion_window": "10",
|
||||
"slow_start_size_threshold": "-1"
|
||||
},
|
||||
{
|
||||
"entry": "2",
|
||||
"local_address": "1C00000A",
|
||||
"local_port": "97D0",
|
||||
"remote_address": "412AF468",
|
||||
"remote_port": "0050",
|
||||
"state": "06",
|
||||
"tx_queue": "00000000",
|
||||
"rx_queue": "00000000",
|
||||
"timer_active": "03",
|
||||
"jiffies_until_timer_expires": "000007C5",
|
||||
"unrecovered_rto_timeouts": "00000000",
|
||||
"uid": "0",
|
||||
"unanswered_0_window_probes": "0",
|
||||
"inode": "0",
|
||||
"sock_ref_count": "3",
|
||||
"sock_mem_loc": "ffff8c7a12d31aa0"
|
||||
},
|
||||
...
|
||||
]
|
||||
"""
|
||||
import binascii
|
||||
import socket
|
||||
import struct
|
||||
from typing import List, Dict
|
||||
import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
"""Provides parser metadata (version, author, etc.)"""
|
||||
version = '1.0'
|
||||
description = '`/proc/net/tcp` and `/proc/net/tcp6` file parser'
|
||||
author = 'Alvin Solomon'
|
||||
author_email = 'alvinms01@gmail.com'
|
||||
compatible = ['linux']
|
||||
tags = ['file']
|
||||
hidden = True
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
|
||||
def hex_to_ip(hexaddr: str) -> str:
|
||||
if len(hexaddr) == 8:
|
||||
addr_long = int(hexaddr, 16)
|
||||
return socket.inet_ntop(socket.AF_INET, struct.pack("<L", addr_long))
|
||||
elif len(hexaddr) == 32:
|
||||
addr = binascii.a2b_hex(hexaddr)
|
||||
addr_tup = struct.unpack('>IIII', addr)
|
||||
addr_bytes = struct.pack('@IIII', *addr_tup)
|
||||
return socket.inet_ntop(socket.AF_INET6, addr_bytes)
|
||||
|
||||
return ''
|
||||
|
||||
|
||||
def _process(proc_data: List[Dict]) -> List[Dict]:
|
||||
"""
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (List of Dictionaries) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
List of Dictionaries. Structured to conform to the schema.
|
||||
"""
|
||||
int_list = {
|
||||
'timer_active', 'uid', 'unanswered_0_window_probes', 'inode',
|
||||
'sock_ref_count', 'retransmit_timeout', 'soft_clock_tick',
|
||||
'ack_quick_pingpong', 'sending_congestion_window',
|
||||
'slow_start_size_threshold'
|
||||
}
|
||||
|
||||
for entry in proc_data:
|
||||
if 'local_address' in entry:
|
||||
entry['local_address'] = hex_to_ip(entry['local_address'])
|
||||
entry['local_port'] = int(entry['local_port'], 16)
|
||||
entry['remote_address'] = hex_to_ip(entry['remote_address'])
|
||||
entry['remote_port'] = int(entry['remote_port'], 16)
|
||||
|
||||
for item in int_list:
|
||||
if item in entry:
|
||||
entry[item] = jc.utils.convert_to_int(entry[item])
|
||||
|
||||
return proc_data
|
||||
|
||||
|
||||
def parse(
|
||||
data: str,
|
||||
raw: bool = False,
|
||||
quiet: bool = False
|
||||
) -> List[Dict]:
|
||||
"""
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) unprocessed output if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
List of Dictionaries. Raw or processed structured data.
|
||||
"""
|
||||
jc.utils.compatibility(__name__, info.compatible, quiet)
|
||||
jc.utils.input_type_check(data)
|
||||
|
||||
raw_output: List = []
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
line_data = data.splitlines()[1:]
|
||||
|
||||
for entry in line_data:
|
||||
line = entry.split()
|
||||
output_line = {}
|
||||
output_line['entry'] = line[0][:-1]
|
||||
|
||||
local_ip_port = line[1]
|
||||
local_ip = local_ip_port.split(':')[0]
|
||||
local_port = local_ip_port.split(':')[1]
|
||||
|
||||
output_line['local_address'] = local_ip
|
||||
output_line['local_port'] = local_port
|
||||
|
||||
remote_ip_port = line[2]
|
||||
remote_ip = remote_ip_port.split(':')[0]
|
||||
remote_port = remote_ip_port.split(':')[1]
|
||||
|
||||
output_line['remote_address'] = remote_ip
|
||||
output_line['remote_port'] = remote_port
|
||||
|
||||
output_line['state'] = line[3]
|
||||
output_line['tx_queue'] = line[4][:8]
|
||||
output_line['rx_queue'] = line[4][9:]
|
||||
output_line['timer_active'] = line[5][:2]
|
||||
output_line['jiffies_until_timer_expires'] = line[5][3:]
|
||||
output_line['unrecovered_rto_timeouts'] = line[6]
|
||||
output_line['uid'] = line[7]
|
||||
output_line['unanswered_0_window_probes'] = line[8]
|
||||
output_line['inode'] = line[9]
|
||||
output_line['sock_ref_count'] = line[10]
|
||||
output_line['sock_mem_loc'] = line[11]
|
||||
|
||||
# fields not always included
|
||||
if len(line) > 12:
|
||||
output_line['retransmit_timeout'] = line[12]
|
||||
output_line['soft_clock_tick'] = line[13]
|
||||
output_line['ack_quick_pingpong'] = line[14]
|
||||
output_line['sending_congestion_window'] = line[15]
|
||||
output_line['slow_start_size_threshold'] = line[16]
|
||||
|
||||
raw_output.append(output_line)
|
||||
|
||||
return raw_output if raw else _process(raw_output)
|
||||
171
jc/parsers/resolve_conf.py
Normal file
171
jc/parsers/resolve_conf.py
Normal file
@@ -0,0 +1,171 @@
|
||||
"""jc - JSON Convert `/etc/resolve.conf` file parser
|
||||
|
||||
This parser may be more forgiving than the system parser. For example, if
|
||||
multiple `search` lists are defined, this parser will append all entries to
|
||||
the `search` field, while the system parser may only use the list from the
|
||||
last defined instance.
|
||||
|
||||
Usage (cli):
|
||||
|
||||
$ cat /etc/resolve.conf | jc --resolve-conf
|
||||
|
||||
Usage (module):
|
||||
|
||||
import jc
|
||||
result = jc.parse('resolve_conf', resolve_conf_output)
|
||||
|
||||
Schema:
|
||||
|
||||
{
|
||||
"domain": string,
|
||||
"search": [
|
||||
string
|
||||
],
|
||||
"nameservers": [
|
||||
string
|
||||
],
|
||||
"options": [
|
||||
string
|
||||
],
|
||||
"sortlist": [
|
||||
string
|
||||
]
|
||||
}
|
||||
|
||||
|
||||
Examples:
|
||||
|
||||
$ cat /etc/resolve.conf | jc --resolve-conf -p
|
||||
{
|
||||
"search": [
|
||||
"eng.myprime.com",
|
||||
"dev.eng.myprime.com",
|
||||
"labs.myprime.com",
|
||||
"qa.myprime.com"
|
||||
],
|
||||
"nameservers": [
|
||||
"10.136.17.15"
|
||||
],
|
||||
"options": [
|
||||
"rotate",
|
||||
"ndots:1"
|
||||
]
|
||||
}
|
||||
"""
|
||||
import re
|
||||
from typing import List, Dict
|
||||
from jc.jc_types import JSONDictType
|
||||
import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
"""Provides parser metadata (version, author, etc.)"""
|
||||
version = '1.0'
|
||||
description = '`/etc/resolve.conf` file parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
compatible = ['linux', 'darwin', 'cygwin', 'win32', 'aix', 'freebsd']
|
||||
tags = ['file']
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
|
||||
def _process(proc_data: JSONDictType) -> JSONDictType:
|
||||
"""
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: Dictionary raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Structured to conform to the schema.
|
||||
"""
|
||||
return proc_data
|
||||
|
||||
|
||||
def parse(
|
||||
data: str,
|
||||
raw: bool = False,
|
||||
quiet: bool = False
|
||||
) -> JSONDictType:
|
||||
"""
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) unprocessed output if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Raw or processed structured data.
|
||||
"""
|
||||
jc.utils.compatibility(__name__, info.compatible, quiet)
|
||||
jc.utils.input_type_check(data)
|
||||
|
||||
raw_output: Dict = {}
|
||||
search: List[str] = []
|
||||
nameservers: List[str] = []
|
||||
options: List[str] = []
|
||||
sortlist: List[str] = []
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
for line in filter(None, data.splitlines()):
|
||||
|
||||
# comments start with # or ; and can be inline
|
||||
if '#' in line or ';' in line:
|
||||
userdata = list(filter(None, re.split("[#;]+", line, maxsplit=1)))
|
||||
userdata = [x for x in userdata if x.strip()]
|
||||
if len(userdata) <= 1: # whole line is a comment
|
||||
continue
|
||||
|
||||
userdata_str = userdata[0].strip()
|
||||
|
||||
else:
|
||||
userdata_str = line.strip()
|
||||
|
||||
if userdata_str.startswith('domain'):
|
||||
raw_output['domain'] = userdata_str.split()[1].strip()
|
||||
continue
|
||||
|
||||
if userdata_str.startswith('search'):
|
||||
search_items = userdata_str.split(maxsplit=1)[1]
|
||||
search_list = search_items.split()
|
||||
search.extend(search_list)
|
||||
continue
|
||||
|
||||
if userdata_str.startswith('nameserver'):
|
||||
ns_str = userdata_str.split()[1]
|
||||
nameservers.append(ns_str)
|
||||
continue
|
||||
|
||||
if userdata_str.startswith('options'):
|
||||
option_items = userdata_str.split(maxsplit=1)[1]
|
||||
option_list = option_items.split()
|
||||
options.extend(option_list)
|
||||
continue
|
||||
|
||||
if userdata_str.startswith('sortlist'):
|
||||
sortlist_items = userdata_str.split(maxsplit=1)[1]
|
||||
sortlist_list = sortlist_items.split()
|
||||
sortlist.extend(sortlist_list)
|
||||
continue
|
||||
|
||||
if search:
|
||||
raw_output['search'] = search
|
||||
|
||||
if nameservers:
|
||||
raw_output['nameservers'] = nameservers
|
||||
|
||||
if options:
|
||||
raw_output['options'] = options
|
||||
|
||||
if sortlist:
|
||||
raw_output['sortlist'] = sortlist
|
||||
|
||||
return raw_output if raw else _process(raw_output)
|
||||
@@ -214,7 +214,7 @@ def parse(data, raw=False, quiet=False):
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
import jc.parsers.route_windows
|
||||
if cleandata[0] in jc.parsers.route_windows.SEPERATORS:
|
||||
if cleandata[0] in jc.parsers.route_windows.SEPARATORS:
|
||||
raw_output = jc.parsers.route_windows.parse(cleandata)
|
||||
else:
|
||||
cleandata.pop(0) # Removing "Kernel IP routing table".
|
||||
|
||||
@@ -6,7 +6,7 @@
|
||||
import re
|
||||
from typing import List
|
||||
|
||||
SEPERATORS = (
|
||||
SEPARATORS = (
|
||||
"===========================================================================",
|
||||
" None"
|
||||
)
|
||||
@@ -24,7 +24,7 @@ ROUTE_TYPES = ("Active Routes:", "Persistent Routes:")
|
||||
def get_lines_until_seperator(iterator):
|
||||
lines = []
|
||||
for line in iterator:
|
||||
if line in SEPERATORS:
|
||||
if line in SEPARATORS:
|
||||
break
|
||||
lines.append(line)
|
||||
return lines
|
||||
@@ -86,7 +86,7 @@ def parse(cleandata: List[str]):
|
||||
interfaces = []
|
||||
for interface_line in data_iterator:
|
||||
interface_line = interface_line.strip()
|
||||
if interface_line in SEPERATORS:
|
||||
if interface_line in SEPARATORS:
|
||||
break
|
||||
|
||||
interface_match = INTERFACE_REGEX.search(interface_line)
|
||||
|
||||
@@ -161,7 +161,7 @@ import jc.utils
|
||||
|
||||
class info():
|
||||
"""Provides parser metadata (version, author, etc.)"""
|
||||
version = '1.6'
|
||||
version = '1.7'
|
||||
description = '`rpm -qi` command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -185,7 +185,7 @@ def _process(proc_data):
|
||||
|
||||
List of Dictionaries. Structured data to conform to the schema.
|
||||
"""
|
||||
int_list = {'epoch', 'size'}
|
||||
int_list = {'epoch', 'size', 'installed_size'}
|
||||
|
||||
for entry in proc_data:
|
||||
for key in entry:
|
||||
@@ -234,7 +234,7 @@ def parse(data, raw=False, quiet=False):
|
||||
for line in filter(None, data.splitlines()):
|
||||
split_line = line.split(': ', maxsplit=1)
|
||||
|
||||
if split_line[0].startswith('Name') and len(split_line) == 2:
|
||||
if (split_line[0].startswith('Name') or split_line[0] == 'Package') and len(split_line) == 2:
|
||||
this_entry = split_line[1].strip()
|
||||
|
||||
if this_entry != last_entry:
|
||||
@@ -247,7 +247,7 @@ def parse(data, raw=False, quiet=False):
|
||||
desc_entry = False
|
||||
|
||||
if len(split_line) == 2:
|
||||
entry_obj[split_line[0].strip().lower().replace(' ', '_')] = split_line[1].strip()
|
||||
entry_obj[split_line[0].strip().lower().replace(' ', '_').replace('-', '_')] = split_line[1].strip()
|
||||
|
||||
if line.startswith('Description :'):
|
||||
desc_entry = True
|
||||
|
||||
173
jc/parsers/swapon.py
Normal file
173
jc/parsers/swapon.py
Normal file
@@ -0,0 +1,173 @@
|
||||
"""jc - JSON Convert `swapon` command output parser
|
||||
|
||||
Usage (cli):
|
||||
|
||||
$ swapon | jc --swapon
|
||||
|
||||
or
|
||||
|
||||
$ jc swapon
|
||||
|
||||
Usage (module):
|
||||
|
||||
import jc
|
||||
result = jc.parse('swapon', swapon_command_output)
|
||||
|
||||
Schema:
|
||||
|
||||
[
|
||||
{
|
||||
"name": string,
|
||||
"type": string,
|
||||
"size": integer,
|
||||
"used": integer,
|
||||
"priority": integer
|
||||
}
|
||||
]
|
||||
|
||||
Example:
|
||||
|
||||
$ swapon | jc --swapon
|
||||
[
|
||||
{
|
||||
"name": "/swapfile",
|
||||
"type": "file",
|
||||
"size": 1073741824,
|
||||
"used": 0,
|
||||
"priority": -2
|
||||
}
|
||||
]
|
||||
"""
|
||||
from enum import Enum
|
||||
from jc.exceptions import ParseError
|
||||
import jc.utils
|
||||
from typing import List, Dict, Union
|
||||
|
||||
|
||||
class info:
|
||||
"""Provides parser metadata (version, author, etc.)"""
|
||||
version = "1.0"
|
||||
description = "`swapon` command parser"
|
||||
author = "Roey Darwish Dror"
|
||||
author_email = "roey.ghost@gmail.com"
|
||||
compatible = ["linux", "freebsd"]
|
||||
magic_commands = ["swapon"]
|
||||
tags = ["command"]
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
_Value = Union[str, int]
|
||||
_Entry = Dict[str, _Value]
|
||||
|
||||
|
||||
class _Column(Enum):
|
||||
NAME = "name"
|
||||
TYPE = "type"
|
||||
SIZE = "size"
|
||||
USED = "used"
|
||||
PRIO = "priority"
|
||||
LABEL = "label"
|
||||
UUID = "uuid"
|
||||
|
||||
@classmethod
|
||||
def from_header(cls, header: str) -> "_Column":
|
||||
if (header == "NAME") or (header == "Filename"):
|
||||
return cls.NAME
|
||||
elif (header == "TYPE") or (header == "Type"):
|
||||
return cls.TYPE
|
||||
elif (header == "SIZE") or (header == "Size"):
|
||||
return cls.SIZE
|
||||
elif (header == "USED") or (header == "Used"):
|
||||
return cls.USED
|
||||
elif (header == "PRIO") or (header == "Priority"):
|
||||
return cls.PRIO
|
||||
elif header == "LABEL":
|
||||
return cls.LABEL
|
||||
elif header == "UUID":
|
||||
return cls.UUID
|
||||
else:
|
||||
raise ParseError(f"Unknown header: {header}")
|
||||
|
||||
|
||||
def _parse_size(size: str) -> int:
|
||||
power = None
|
||||
if size[-1] == "B":
|
||||
power = 0
|
||||
if size[-1] == "K":
|
||||
power = 1
|
||||
elif size[-1] == "M":
|
||||
power = 2
|
||||
elif size[-1] == "G":
|
||||
power = 3
|
||||
elif size[-1] == "T":
|
||||
power = 4
|
||||
|
||||
multiplier = 1024**power if power is not None else 1024
|
||||
|
||||
return (int(size[:-1]) if power is not None else int(size)) * multiplier
|
||||
|
||||
|
||||
def _value(value: str, column: _Column) -> _Value:
|
||||
if column == _Column.SIZE or column == _Column.USED:
|
||||
return _parse_size(value)
|
||||
elif column == _Column.PRIO:
|
||||
return int(value)
|
||||
else:
|
||||
return value
|
||||
|
||||
|
||||
def _process(proc_data: List[Dict]) -> List[Dict]:
|
||||
"""
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (List of Dictionaries) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
List of Dictionaries. Structured to conform to the schema.
|
||||
"""
|
||||
return proc_data
|
||||
|
||||
|
||||
def parse(data: str, raw: bool = False, quiet: bool = False) -> List[_Entry]:
|
||||
"""
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) unprocessed output if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Raw or processed structured data.
|
||||
"""
|
||||
jc.utils.compatibility(__name__, info.compatible, quiet)
|
||||
jc.utils.input_type_check(data)
|
||||
|
||||
raw_output: List[dict] = []
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
lines = iter(data.splitlines())
|
||||
headers = next(lines)
|
||||
columns = headers.split()
|
||||
for each_line in lines:
|
||||
line = each_line.split()
|
||||
diff = len(columns) - len(line)
|
||||
if not 0 <= diff <= 2:
|
||||
raise ParseError(
|
||||
f"Number of columns ({len(line)}) in line does not match number of headers ({len(columns)})"
|
||||
)
|
||||
|
||||
document: _Entry = {}
|
||||
for each_column, value in zip(columns, line):
|
||||
column = _Column.from_header(each_column)
|
||||
document[column.value] = _value(value, column)
|
||||
|
||||
raw_output.append(document)
|
||||
|
||||
return raw_output if raw else _process(raw_output)
|
||||
300
jc/parsers/tune2fs.py
Normal file
300
jc/parsers/tune2fs.py
Normal file
@@ -0,0 +1,300 @@
|
||||
"""jc - JSON Convert `tune2fs -l` command output parser
|
||||
|
||||
Usage (cli):
|
||||
|
||||
$ tune2fs -l /dev/xvda4 | jc --tune2fs
|
||||
|
||||
or
|
||||
|
||||
$ jc tune2fs -l /dev/xvda4
|
||||
|
||||
Usage (module):
|
||||
|
||||
import jc
|
||||
result = jc.parse('tune2fs', tune2fs_command_output)
|
||||
|
||||
Schema:
|
||||
|
||||
{
|
||||
"version": string,
|
||||
"filesystem_volume_name": string,
|
||||
"last_mounted_on": string,
|
||||
"filesystem_uuid": string,
|
||||
"filesystem_magic_number": string,
|
||||
"filesystem_revision_number": string,
|
||||
"filesystem_features": [
|
||||
string
|
||||
],
|
||||
"filesystem_flags": string,
|
||||
"default_mount_options": string,
|
||||
"filesystem_state": string,
|
||||
"errors_behavior": string,
|
||||
"filesystem_os_type": string,
|
||||
"inode_count": integer,
|
||||
"block_count": integer,
|
||||
"reserved_block_count": integer,
|
||||
"overhead_clusters": integer,
|
||||
"free_blocks": integer,
|
||||
"free_inodes": integer,
|
||||
"first_block": integer,
|
||||
"block_size": integer,
|
||||
"fragment_size": integer,
|
||||
"group_descriptor_size": integer,
|
||||
"reserved_gdt_blocks": integer,
|
||||
"blocks_per_group": integer,
|
||||
"fragments_per_group": integer,
|
||||
"inodes_per_group": integer,
|
||||
"inode_blocks_per_group": integer,
|
||||
"flex_block_group_size": integer,
|
||||
"filesystem_created": string,
|
||||
"filesystem_created_epoch": integer,
|
||||
"filesystem_created_epoch_utc": integer,
|
||||
"last_mount_time": string,
|
||||
"last_mount_time_epoch": integer,
|
||||
"last_mount_time_epoch_utc": integer,
|
||||
"last_write_time": string,
|
||||
"last_write_time_epoch": integer,
|
||||
"last_write_time_epoch_utc": integer,
|
||||
"mount_count": integer,
|
||||
"maximum_mount_count": integer,
|
||||
"last_checked": string,
|
||||
"last_checked_epoch": integer,
|
||||
"last_checked_epoch_utc": integer,
|
||||
"check_interval": string,
|
||||
"lifetime_writes": string,
|
||||
"reserved_blocks_uid": string,
|
||||
"reserved_blocks_gid": string,
|
||||
"first_inode": integer,
|
||||
"inode_size": integer,
|
||||
"required_extra_isize": integer,
|
||||
"desired_extra_isize": integer,
|
||||
"journal_inode": integer,
|
||||
"default_directory_hash": string,
|
||||
"directory_hash_seed": string,
|
||||
"journal_backup": string,
|
||||
"checksum_type": string,
|
||||
"checksum": string
|
||||
}
|
||||
|
||||
Examples:
|
||||
|
||||
$ tune2fs | jc --tune2fs -p
|
||||
{
|
||||
"version": "1.46.2 (28-Feb-2021)",
|
||||
"filesystem_volume_name": "<none>",
|
||||
"last_mounted_on": "/home",
|
||||
"filesystem_uuid": "5fb78e1a-b214-44e2-a309-8e35116d8dd6",
|
||||
"filesystem_magic_number": "0xEF53",
|
||||
"filesystem_revision_number": "1 (dynamic)",
|
||||
"filesystem_features": [
|
||||
"has_journal",
|
||||
"ext_attr",
|
||||
"resize_inode",
|
||||
"dir_index",
|
||||
"filetype",
|
||||
"needs_recovery",
|
||||
"extent",
|
||||
"64bit",
|
||||
"flex_bg",
|
||||
"sparse_super",
|
||||
"large_file",
|
||||
"huge_file",
|
||||
"dir_nlink",
|
||||
"extra_isize",
|
||||
"metadata_csum"
|
||||
],
|
||||
"filesystem_flags": "signed_directory_hash",
|
||||
"default_mount_options": "user_xattr acl",
|
||||
"filesystem_state": "clean",
|
||||
"errors_behavior": "Continue",
|
||||
"filesystem_os_type": "Linux",
|
||||
"inode_count": 3932160,
|
||||
"block_count": 15728640,
|
||||
"reserved_block_count": 786432,
|
||||
"free_blocks": 15198453,
|
||||
"free_inodes": 3864620,
|
||||
"first_block": 0,
|
||||
"block_size": 4096,
|
||||
"fragment_size": 4096,
|
||||
"group_descriptor_size": 64,
|
||||
"reserved_gdt_blocks": 1024,
|
||||
"blocks_per_group": 32768,
|
||||
"fragments_per_group": 32768,
|
||||
"inodes_per_group": 8192,
|
||||
"inode_blocks_per_group": 512,
|
||||
"flex_block_group_size": 16,
|
||||
"filesystem_created": "Mon Apr 6 15:10:37 2020",
|
||||
"last_mount_time": "Mon Sep 19 15:16:20 2022",
|
||||
"last_write_time": "Mon Sep 19 15:16:20 2022",
|
||||
"mount_count": 14,
|
||||
"maximum_mount_count": -1,
|
||||
"last_checked": "Fri Apr 8 15:24:22 2022",
|
||||
"check_interval": "0 (<none>)",
|
||||
"lifetime_writes": "203 GB",
|
||||
"reserved_blocks_uid": "0 (user root)",
|
||||
"reserved_blocks_gid": "0 (group root)",
|
||||
"first_inode": 11,
|
||||
"inode_size": 256,
|
||||
"required_extra_isize": 32,
|
||||
"desired_extra_isize": 32,
|
||||
"journal_inode": 8,
|
||||
"default_directory_hash": "half_md4",
|
||||
"directory_hash_seed": "67d5358d-723d-4ce3-b3c0-30ddb433ad9e",
|
||||
"journal_backup": "inode blocks",
|
||||
"checksum_type": "crc32c",
|
||||
"checksum": "0x7809afff",
|
||||
"filesystem_created_epoch": 1586211037,
|
||||
"filesystem_created_epoch_utc": null,
|
||||
"last_mount_time_epoch": 1663625780,
|
||||
"last_mount_time_epoch_utc": null,
|
||||
"last_write_time_epoch": 1663625780,
|
||||
"last_write_time_epoch_utc": null,
|
||||
"last_checked_epoch": 1649456662,
|
||||
"last_checked_epoch_utc": null
|
||||
}
|
||||
|
||||
$ tune2fs | jc --tune2fs -p -r
|
||||
{
|
||||
"version": "1.46.2 (28-Feb-2021)",
|
||||
"filesystem_volume_name": "<none>",
|
||||
"last_mounted_on": "/home",
|
||||
"filesystem_uuid": "5fb78e1a-b214-44e2-a309-8e35116d8dd6",
|
||||
"filesystem_magic_number": "0xEF53",
|
||||
"filesystem_revision_number": "1 (dynamic)",
|
||||
"filesystem_features": "has_journal ext_attr resize_inode dir_index filetype needs_recovery extent 64bit flex_bg sparse_super large_file huge_file dir_nlink extra_isize metadata_csum",
|
||||
"filesystem_flags": "signed_directory_hash",
|
||||
"default_mount_options": "user_xattr acl",
|
||||
"filesystem_state": "clean",
|
||||
"errors_behavior": "Continue",
|
||||
"filesystem_os_type": "Linux",
|
||||
"inode_count": "3932160",
|
||||
"block_count": "15728640",
|
||||
"reserved_block_count": "786432",
|
||||
"free_blocks": "15198453",
|
||||
"free_inodes": "3864620",
|
||||
"first_block": "0",
|
||||
"block_size": "4096",
|
||||
"fragment_size": "4096",
|
||||
"group_descriptor_size": "64",
|
||||
"reserved_gdt_blocks": "1024",
|
||||
"blocks_per_group": "32768",
|
||||
"fragments_per_group": "32768",
|
||||
"inodes_per_group": "8192",
|
||||
"inode_blocks_per_group": "512",
|
||||
"flex_block_group_size": "16",
|
||||
"filesystem_created": "Mon Apr 6 15:10:37 2020",
|
||||
"last_mount_time": "Mon Sep 19 15:16:20 2022",
|
||||
"last_write_time": "Mon Sep 19 15:16:20 2022",
|
||||
"mount_count": "14",
|
||||
"maximum_mount_count": "-1",
|
||||
"last_checked": "Fri Apr 8 15:24:22 2022",
|
||||
"check_interval": "0 (<none>)",
|
||||
"lifetime_writes": "203 GB",
|
||||
"reserved_blocks_uid": "0 (user root)",
|
||||
"reserved_blocks_gid": "0 (group root)",
|
||||
"first_inode": "11",
|
||||
"inode_size": "256",
|
||||
"required_extra_isize": "32",
|
||||
"desired_extra_isize": "32",
|
||||
"journal_inode": "8",
|
||||
"default_directory_hash": "half_md4",
|
||||
"directory_hash_seed": "67d5358d-723d-4ce3-b3c0-30ddb433ad9e",
|
||||
"journal_backup": "inode blocks",
|
||||
"checksum_type": "crc32c",
|
||||
"checksum": "0x7809afff"
|
||||
}
|
||||
"""
|
||||
from typing import Dict
|
||||
from jc.jc_types import JSONDictType
|
||||
import jc.utils
|
||||
|
||||
|
||||
class info():
|
||||
"""Provides parser metadata (version, author, etc.)"""
|
||||
version = '1.0'
|
||||
description = '`tune2fs -l` command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
compatible = ['linux']
|
||||
tags = ['command']
|
||||
magic_commands = ['tune2fs -l']
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
|
||||
def _process(proc_data: JSONDictType) -> JSONDictType:
|
||||
"""
|
||||
Final processing to conform to the schema.
|
||||
|
||||
Parameters:
|
||||
|
||||
proc_data: (Dictionary) raw structured data to process
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Structured to conform to the schema.
|
||||
"""
|
||||
int_list = {'inode_count', 'block_count', 'reserved_block_count', 'free_blocks',
|
||||
'free_inodes', 'first_block', 'block_size', 'fragment_size',
|
||||
'group_descriptor_size', 'reserved_gdt_blocks', 'blocks_per_group',
|
||||
'fragments_per_group', 'inodes_per_group', 'inode_blocks_per_group',
|
||||
'flex_block_group_size', 'mount_count', 'maximum_mount_count',
|
||||
'first_inode', 'inode_size', 'required_extra_isize', 'desired_extra_isize',
|
||||
'journal_inode', 'overhead_clusters'}
|
||||
|
||||
datetime_list = {'filesystem_created', 'last_mount_time', 'last_write_time', 'last_checked'}
|
||||
|
||||
for key in proc_data:
|
||||
if key in int_list:
|
||||
proc_data[key] = jc.utils.convert_to_int(proc_data[key])
|
||||
|
||||
for key in proc_data.copy():
|
||||
if key in datetime_list:
|
||||
dt = jc.utils.timestamp(proc_data[key], (1000,))
|
||||
proc_data[key + '_epoch'] = dt.naive
|
||||
proc_data[key + '_epoch_utc'] = dt.utc
|
||||
|
||||
if 'filesystem_features' in proc_data:
|
||||
proc_data['filesystem_features'] = proc_data['filesystem_features'].split()
|
||||
|
||||
return proc_data
|
||||
|
||||
|
||||
def parse(
|
||||
data: str,
|
||||
raw: bool = False,
|
||||
quiet: bool = False
|
||||
) -> JSONDictType:
|
||||
"""
|
||||
Main text parsing function
|
||||
|
||||
Parameters:
|
||||
|
||||
data: (string) text data to parse
|
||||
raw: (boolean) unprocessed output if True
|
||||
quiet: (boolean) suppress warning messages if True
|
||||
|
||||
Returns:
|
||||
|
||||
Dictionary. Raw or processed structured data.
|
||||
"""
|
||||
jc.utils.compatibility(__name__, info.compatible, quiet)
|
||||
jc.utils.input_type_check(data)
|
||||
|
||||
raw_output: Dict = {}
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
|
||||
for line in filter(None, data.splitlines()):
|
||||
|
||||
if line.startswith('tune2fs '):
|
||||
raw_output['version'] = line.split(maxsplit=1)[1]
|
||||
continue
|
||||
|
||||
linesplit = line.split(':', maxsplit=1)
|
||||
key = linesplit[0].lower().replace(' ', '_').replace('#', 'number')
|
||||
val = linesplit[1].strip()
|
||||
raw_output[key] = val
|
||||
|
||||
return raw_output if raw else _process(raw_output)
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
Best-effort attempt to parse various styles of version numbers. This parser
|
||||
is based off of the version parser included in the CPython distutils
|
||||
libary.
|
||||
library.
|
||||
|
||||
If the version string conforms to some de facto-standard versioning rules
|
||||
followed by many developers a `strict` key will be present in the output
|
||||
|
||||
@@ -121,12 +121,16 @@ Examples:
|
||||
}
|
||||
]
|
||||
"""
|
||||
import re
|
||||
import jc.utils
|
||||
|
||||
PROCS_HEADER_RE = re.compile(r'^-*procs-* ')
|
||||
DISK_HEADER_RE = re.compile(r'^-*disk-* ')
|
||||
|
||||
|
||||
class info():
|
||||
"""Provides parser metadata (version, author, etc.)"""
|
||||
version = '1.3'
|
||||
version = '1.4'
|
||||
description = '`vmstat` command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -203,18 +207,18 @@ def parse(data, raw=False, quiet=False):
|
||||
for line in filter(None, data.splitlines()):
|
||||
|
||||
# detect output type
|
||||
if not procs and not disk and line.startswith('procs'):
|
||||
if not procs and not disk and PROCS_HEADER_RE.match(line):
|
||||
procs = True
|
||||
tstamp = '-timestamp-' in line
|
||||
continue
|
||||
|
||||
if not procs and not disk and line.startswith('disk'):
|
||||
if not procs and not disk and DISK_HEADER_RE.match(line):
|
||||
disk = True
|
||||
tstamp = '-timestamp-' in line
|
||||
continue
|
||||
|
||||
# skip header rows
|
||||
if (procs or disk) and (line.startswith('procs') or line.startswith('disk')):
|
||||
if (procs or disk) and (PROCS_HEADER_RE.match(line) or DISK_HEADER_RE.match(line)):
|
||||
continue
|
||||
|
||||
if 'swpd' in line and 'free' in line and 'buff' in line and 'cache' in line:
|
||||
|
||||
@@ -91,16 +91,20 @@ Examples:
|
||||
{"runnable_procs":"2","uninterruptible_sleeping_procs":"0","virtua...}
|
||||
...
|
||||
"""
|
||||
import re
|
||||
import jc.utils
|
||||
from jc.streaming import (
|
||||
add_jc_meta, streaming_input_type_check, streaming_line_input_type_check, raise_or_yield
|
||||
)
|
||||
from jc.exceptions import ParseError
|
||||
|
||||
PROCS_HEADER_RE = re.compile(r'^-*procs-* ')
|
||||
DISK_HEADER_RE = re.compile(r'^-*disk-* ')
|
||||
|
||||
|
||||
class info():
|
||||
"""Provides parser metadata (version, author, etc.)"""
|
||||
version = '1.2'
|
||||
version = '1.3'
|
||||
description = '`vmstat` command streaming parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -182,18 +186,18 @@ def parse(data, raw=False, quiet=False, ignore_exceptions=False):
|
||||
continue
|
||||
|
||||
# detect output type
|
||||
if not procs and not disk and line.startswith('procs'):
|
||||
if not procs and not disk and PROCS_HEADER_RE.match(line):
|
||||
procs = True
|
||||
tstamp = '-timestamp-' in line
|
||||
continue
|
||||
|
||||
if not procs and not disk and line.startswith('disk'):
|
||||
if not procs and not disk and DISK_HEADER_RE.match(line):
|
||||
disk = True
|
||||
tstamp = '-timestamp-' in line
|
||||
continue
|
||||
|
||||
# skip header rows
|
||||
if (procs or disk) and (line.startswith('procs') or line.startswith('disk')):
|
||||
if (procs or disk) and (PROCS_HEADER_RE.match(line) or DISK_HEADER_RE.match(line)):
|
||||
continue
|
||||
|
||||
if 'swpd' in line and 'free' in line and 'buff' in line and 'cache' in line:
|
||||
|
||||
@@ -136,7 +136,7 @@ import jc.utils
|
||||
|
||||
class info():
|
||||
"""Provides parser metadata (version, author, etc.)"""
|
||||
version = '1.7'
|
||||
version = '1.8'
|
||||
description = '`who` command parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -272,6 +272,13 @@ def parse(data, raw=False, quiet=False):
|
||||
output_line['time'] = ' '.join([linedata.pop(0),
|
||||
linedata.pop(0)])
|
||||
|
||||
# if the rest of the data is within parens, then it's the remote IP or console
|
||||
if len(linedata) > 1 and ' '.join(linedata).startswith('(') and ' '.join(linedata).endswith(')'):
|
||||
output_line['from'] = ' '.join(linedata)[1:-1]
|
||||
raw_output.append(output_line)
|
||||
linedata = []
|
||||
continue
|
||||
|
||||
# if just one more field, then it's the remote IP
|
||||
if len(linedata) == 1:
|
||||
output_line['from'] = linedata[0].replace('(', '').replace(')', '')
|
||||
@@ -296,7 +303,4 @@ def parse(data, raw=False, quiet=False):
|
||||
|
||||
raw_output.append(output_line)
|
||||
|
||||
if raw:
|
||||
return raw_output
|
||||
else:
|
||||
return _process(raw_output)
|
||||
return raw_output if raw else _process(raw_output)
|
||||
|
||||
@@ -413,7 +413,7 @@ from jc.parsers.asn1crypto import pem, x509, jc_global
|
||||
|
||||
class info():
|
||||
"""Provides parser metadata (version, author, etc.)"""
|
||||
version = '1.2'
|
||||
version = '1.3'
|
||||
description = 'X.509 PEM and DER certificate file parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -477,7 +477,10 @@ def _fix_objects(obj):
|
||||
# according to the spec this field can be string or integer
|
||||
if isinstance(v, int):
|
||||
v_str = str(v)
|
||||
v_hex = _b2a(_i2b(v))
|
||||
if v < 0:
|
||||
v_hex = "(Negative)" + _b2a(_i2b(abs(v)))
|
||||
else:
|
||||
v_hex = _b2a(_i2b(v))
|
||||
else:
|
||||
v_str = str(v)
|
||||
v_hex = _b2a(v_str.encode())
|
||||
|
||||
@@ -81,7 +81,7 @@ except Exception:
|
||||
|
||||
class info():
|
||||
"""Provides parser metadata (version, author, etc.)"""
|
||||
version = '1.7'
|
||||
version = '1.9'
|
||||
description = 'XML file parser'
|
||||
author = 'Kelly Brazil'
|
||||
author_email = 'kellyjonbrazil@gmail.com'
|
||||
@@ -105,13 +105,18 @@ def _process(proc_data, has_data=False):
|
||||
|
||||
Dictionary representing an XML document.
|
||||
"""
|
||||
raw_output = []
|
||||
proc_output = []
|
||||
|
||||
if has_data:
|
||||
# standard output with @ prefix for attributes
|
||||
raw_output = xmltodict.parse(proc_data, dict_constructor=dict)
|
||||
try:
|
||||
proc_output = xmltodict.parse(proc_data,
|
||||
dict_constructor=dict,
|
||||
process_comments=True)
|
||||
except (ValueError, TypeError):
|
||||
proc_output = xmltodict.parse(proc_data, dict_constructor=dict)
|
||||
|
||||
return raw_output
|
||||
return proc_output
|
||||
|
||||
|
||||
def parse(data, raw=False, quiet=False):
|
||||
@@ -137,10 +142,17 @@ def parse(data, raw=False, quiet=False):
|
||||
if jc.utils.has_data(data):
|
||||
has_data = True
|
||||
|
||||
if raw:
|
||||
if has_data:
|
||||
# modified output with _ prefix for attributes
|
||||
raw_output = xmltodict.parse(data, dict_constructor=dict, attr_prefix='_')
|
||||
if raw and has_data:
|
||||
# modified output with _ prefix for attributes
|
||||
try:
|
||||
raw_output = xmltodict.parse(data,
|
||||
dict_constructor=dict,
|
||||
process_comments=True,
|
||||
attr_prefix='_')
|
||||
except (ValueError, TypeError):
|
||||
raw_output = xmltodict.parse(data,
|
||||
dict_constructor=dict,
|
||||
attr_prefix='_')
|
||||
|
||||
return raw_output
|
||||
|
||||
|
||||
@@ -26,22 +26,24 @@ Schema:
|
||||
"current_height": integer,
|
||||
"maximum_width": integer,
|
||||
"maximum_height": integer,
|
||||
"associated_device": {
|
||||
"associated_modes": [
|
||||
{
|
||||
"resolution_width": integer,
|
||||
"resolution_height": integer,
|
||||
"is_high_resolution": boolean,
|
||||
"frequencies": [
|
||||
{
|
||||
"frequency": float,
|
||||
"is_current": boolean,
|
||||
"is_preferred": boolean
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
},
|
||||
"devices": [
|
||||
{
|
||||
"modes": [
|
||||
{
|
||||
"resolution_width": integer,
|
||||
"resolution_height": integer,
|
||||
"is_high_resolution": boolean,
|
||||
"frequencies": [
|
||||
{
|
||||
"frequency": float,
|
||||
"is_current": boolean,
|
||||
"is_preferred": boolean
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"is_connected": boolean,
|
||||
"is_primary": boolean,
|
||||
"device_name": string,
|
||||
@@ -57,24 +59,6 @@ Schema:
|
||||
"rotation": string,
|
||||
"reflection": string
|
||||
}
|
||||
],
|
||||
"unassociated_devices": [
|
||||
{
|
||||
"associated_modes": [
|
||||
{
|
||||
"resolution_width": integer,
|
||||
"resolution_height": integer,
|
||||
"is_high_resolution": boolean,
|
||||
"frequencies": [
|
||||
{
|
||||
"frequency": float,
|
||||
"is_current": boolean,
|
||||
"is_preferred": boolean
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
@@ -91,53 +75,54 @@ Examples:
|
||||
"current_height": 1080,
|
||||
"maximum_width": 32767,
|
||||
"maximum_height": 32767,
|
||||
"associated_device": {
|
||||
"associated_modes": [
|
||||
{
|
||||
"resolution_width": 1920,
|
||||
"resolution_height": 1080,
|
||||
"is_high_resolution": false,
|
||||
"frequencies": [
|
||||
{
|
||||
"frequency": 60.03,
|
||||
"is_current": true,
|
||||
"is_preferred": true
|
||||
},
|
||||
{
|
||||
"frequency": 59.93,
|
||||
"is_current": false,
|
||||
"is_preferred": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"resolution_width": 1680,
|
||||
"resolution_height": 1050,
|
||||
"is_high_resolution": false,
|
||||
"frequencies": [
|
||||
{
|
||||
"frequency": 59.88,
|
||||
"is_current": false,
|
||||
"is_preferred": false
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"is_connected": true,
|
||||
"is_primary": true,
|
||||
"device_name": "eDP1",
|
||||
"resolution_width": 1920,
|
||||
"resolution_height": 1080,
|
||||
"offset_width": 0,
|
||||
"offset_height": 0,
|
||||
"dimension_width": 310,
|
||||
"dimension_height": 170,
|
||||
"rotation": "normal",
|
||||
"reflection": "normal"
|
||||
}
|
||||
"devices": [
|
||||
{
|
||||
"modes": [
|
||||
{
|
||||
"resolution_width": 1920,
|
||||
"resolution_height": 1080,
|
||||
"is_high_resolution": false,
|
||||
"frequencies": [
|
||||
{
|
||||
"frequency": 60.03,
|
||||
"is_current": true,
|
||||
"is_preferred": true
|
||||
},
|
||||
{
|
||||
"frequency": 59.93,
|
||||
"is_current": false,
|
||||
"is_preferred": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"resolution_width": 1680,
|
||||
"resolution_height": 1050,
|
||||
"is_high_resolution": false,
|
||||
"frequencies": [
|
||||
{
|
||||
"frequency": 59.88,
|
||||
"is_current": false,
|
||||
"is_preferred": false
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"is_connected": true,
|
||||
"is_primary": true,
|
||||
"device_name": "eDP1",
|
||||
"resolution_width": 1920,
|
||||
"resolution_height": 1080,
|
||||
"offset_width": 0,
|
||||
"offset_height": 0,
|
||||
"dimension_width": 310,
|
||||
"dimension_height": 170,
|
||||
"rotation": "normal",
|
||||
"reflection": "normal"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"unassociated_devices": []
|
||||
]
|
||||
}
|
||||
|
||||
$ xrandr --properties | jc --xrandr -p
|
||||
@@ -151,56 +136,57 @@ Examples:
|
||||
"current_height": 1080,
|
||||
"maximum_width": 32767,
|
||||
"maximum_height": 32767,
|
||||
"associated_device": {
|
||||
"associated_modes": [
|
||||
{
|
||||
"resolution_width": 1920,
|
||||
"resolution_height": 1080,
|
||||
"is_high_resolution": false,
|
||||
"frequencies": [
|
||||
{
|
||||
"frequency": 60.03,
|
||||
"is_current": true,
|
||||
"is_preferred": true
|
||||
},
|
||||
{
|
||||
"frequency": 59.93,
|
||||
"is_current": false,
|
||||
"is_preferred": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"resolution_width": 1680,
|
||||
"resolution_height": 1050,
|
||||
"is_high_resolution": false,
|
||||
"frequencies": [
|
||||
{
|
||||
"frequency": 59.88,
|
||||
"is_current": false,
|
||||
"is_preferred": false
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"is_connected": true,
|
||||
"is_primary": true,
|
||||
"device_name": "eDP1",
|
||||
"model_name": "ASUS VW193S",
|
||||
"product_id": "54297",
|
||||
"serial_number": "78L8021107",
|
||||
"resolution_width": 1920,
|
||||
"resolution_height": 1080,
|
||||
"offset_width": 0,
|
||||
"offset_height": 0,
|
||||
"dimension_width": 310,
|
||||
"dimension_height": 170,
|
||||
"rotation": "normal",
|
||||
"reflection": "normal"
|
||||
}
|
||||
"devices": [
|
||||
{
|
||||
"modes": [
|
||||
{
|
||||
"resolution_width": 1920,
|
||||
"resolution_height": 1080,
|
||||
"is_high_resolution": false,
|
||||
"frequencies": [
|
||||
{
|
||||
"frequency": 60.03,
|
||||
"is_current": true,
|
||||
"is_preferred": true
|
||||
},
|
||||
{
|
||||
"frequency": 59.93,
|
||||
"is_current": false,
|
||||
"is_preferred": false
|
||||
}
|
||||
]
|
||||
},
|
||||
{
|
||||
"resolution_width": 1680,
|
||||
"resolution_height": 1050,
|
||||
"is_high_resolution": false,
|
||||
"frequencies": [
|
||||
{
|
||||
"frequency": 59.88,
|
||||
"is_current": false,
|
||||
"is_preferred": false
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"is_connected": true,
|
||||
"is_primary": true,
|
||||
"device_name": "eDP1",
|
||||
"model_name": "ASUS VW193S",
|
||||
"product_id": "54297",
|
||||
"serial_number": "78L8021107",
|
||||
"resolution_width": 1920,
|
||||
"resolution_height": 1080,
|
||||
"offset_width": 0,
|
||||
"offset_height": 0,
|
||||
"dimension_width": 310,
|
||||
"dimension_height": 170,
|
||||
"rotation": "normal",
|
||||
"reflection": "normal"
|
||||
}
|
||||
]
|
||||
}
|
||||
],
|
||||
"unassociated_devices": []
|
||||
]
|
||||
}
|
||||
"""
|
||||
import re
|
||||
@@ -212,18 +198,47 @@ from jc.parsers.pyedid.helpers.edid_helper import EdidHelper
|
||||
|
||||
class info:
|
||||
"""Provides parser metadata (version, author, etc.)"""
|
||||
version = "1.2"
|
||||
version = "1.4"
|
||||
description = "`xrandr` command parser"
|
||||
author = "Kevin Lyter"
|
||||
author_email = "lyter_git at sent.com"
|
||||
details = 'Using parts of the pyedid library at https://github.com/jojonas/pyedid.'
|
||||
author_email = "code (at) lyterk.com"
|
||||
details = "Using parts of the pyedid library at https://github.com/jojonas/pyedid."
|
||||
compatible = ["linux", "darwin", "cygwin", "aix", "freebsd"]
|
||||
magic_commands = ["xrandr"]
|
||||
tags = ['command']
|
||||
tags = ["command"]
|
||||
|
||||
|
||||
__version__ = info.version
|
||||
|
||||
# keep parsing state so we know which parsers have already tried the line
|
||||
# Structure is:
|
||||
# {
|
||||
# <line_string>: [
|
||||
# <parser_string>
|
||||
# ]
|
||||
# }
|
||||
#
|
||||
# Where <line_string> is the xrandr output line to be checked and <parser_string>
|
||||
# can contain "screen", "device", or "model"
|
||||
parse_state: Dict[str, List] = {}
|
||||
|
||||
|
||||
def _was_parsed(line: str, parser: str) -> bool:
|
||||
"""
|
||||
Check if entered parser has already parsed. If so return True.
|
||||
If not, return false and add the parser to the list for the line entry.
|
||||
"""
|
||||
if line in parse_state:
|
||||
if parser in parse_state[line]:
|
||||
return True
|
||||
|
||||
parse_state[line].append(parser)
|
||||
return False
|
||||
|
||||
parse_state[line] = [parser]
|
||||
return False
|
||||
|
||||
|
||||
try:
|
||||
from typing import TypedDict
|
||||
|
||||
@@ -267,7 +282,7 @@ try:
|
||||
"offset_height": int,
|
||||
"dimension_width": int,
|
||||
"dimension_height": int,
|
||||
"associated_modes": List[Mode],
|
||||
"modes": List[Mode],
|
||||
"rotation": str,
|
||||
"reflection": str,
|
||||
},
|
||||
@@ -282,14 +297,13 @@ try:
|
||||
"current_height": int,
|
||||
"maximum_width": int,
|
||||
"maximum_height": int,
|
||||
"associated_device": Device,
|
||||
"devices": List[Device],
|
||||
},
|
||||
)
|
||||
Response = TypedDict(
|
||||
"Response",
|
||||
{
|
||||
"screens": List[Screen],
|
||||
"unassociated_devices": List[Device],
|
||||
},
|
||||
)
|
||||
except ImportError:
|
||||
@@ -311,20 +325,27 @@ _screen_pattern = (
|
||||
|
||||
def _parse_screen(next_lines: List[str]) -> Optional[Screen]:
|
||||
next_line = next_lines.pop()
|
||||
|
||||
if _was_parsed(next_line, 'screen'):
|
||||
return None
|
||||
|
||||
result = re.match(_screen_pattern, next_line)
|
||||
if not result:
|
||||
next_lines.append(next_line)
|
||||
return None
|
||||
|
||||
raw_matches = result.groupdict()
|
||||
screen: Screen = {}
|
||||
|
||||
screen: Screen = {"devices": []}
|
||||
for k, v in raw_matches.items():
|
||||
screen[k] = int(v)
|
||||
|
||||
if next_lines:
|
||||
while next_lines:
|
||||
device: Optional[Device] = _parse_device(next_lines)
|
||||
if device:
|
||||
screen["associated_device"] = device
|
||||
if not device:
|
||||
break
|
||||
else:
|
||||
screen["devices"].append(device)
|
||||
|
||||
return screen
|
||||
|
||||
@@ -340,8 +361,8 @@ _device_pattern = (
|
||||
+ r"\+(?P<offset_width>\d+)\+(?P<offset_height>\d+))? "
|
||||
+ r"(?P<rotation>(normal|right|left|inverted)?) ?"
|
||||
+ r"(?P<reflection>(X axis|Y axis|X and Y axis)?) ?"
|
||||
+ r"\(normal left inverted right x axis y axis\)"
|
||||
+ r"( ((?P<dimension_width>\d+)mm x (?P<dimension_height>\d+)mm)?)?"
|
||||
+ r"(\(normal left inverted right x axis y axis\))?"
|
||||
+ r"( ?((?P<dimension_width>\d+)mm x (?P<dimension_height>\d+)mm)?)?"
|
||||
)
|
||||
|
||||
|
||||
@@ -350,6 +371,10 @@ def _parse_device(next_lines: List[str], quiet: bool = False) -> Optional[Device
|
||||
return None
|
||||
|
||||
next_line = next_lines.pop()
|
||||
|
||||
if _was_parsed(next_line, 'device'):
|
||||
return None
|
||||
|
||||
result = re.match(_device_pattern, next_line)
|
||||
if not result:
|
||||
next_lines.append(next_line)
|
||||
@@ -358,7 +383,7 @@ def _parse_device(next_lines: List[str], quiet: bool = False) -> Optional[Device
|
||||
matches = result.groupdict()
|
||||
|
||||
device: Device = {
|
||||
"associated_modes": [],
|
||||
"modes": [],
|
||||
"is_connected": matches["is_connected"] == "connected",
|
||||
"is_primary": matches["is_primary"] is not None
|
||||
and len(matches["is_primary"]) > 0,
|
||||
@@ -367,14 +392,21 @@ def _parse_device(next_lines: List[str], quiet: bool = False) -> Optional[Device
|
||||
"reflection": matches["reflection"] or "normal",
|
||||
}
|
||||
for k, v in matches.items():
|
||||
if k not in {"is_connected", "is_primary", "device_name", "rotation", "reflection"}:
|
||||
if k not in {
|
||||
"is_connected",
|
||||
"is_primary",
|
||||
"device_name",
|
||||
"rotation",
|
||||
"reflection",
|
||||
}:
|
||||
try:
|
||||
if v:
|
||||
device[k] = int(v)
|
||||
except ValueError and not quiet:
|
||||
jc.utils.warning_message(
|
||||
[f"{next_line} : {k} - {v} is not int-able"]
|
||||
)
|
||||
except ValueError:
|
||||
if not quiet:
|
||||
jc.utils.warning_message(
|
||||
[f"{next_line} : {k} - {v} is not int-able"]
|
||||
)
|
||||
|
||||
model: Optional[Model] = _parse_model(next_lines, quiet)
|
||||
if model:
|
||||
@@ -386,7 +418,7 @@ def _parse_device(next_lines: List[str], quiet: bool = False) -> Optional[Device
|
||||
next_line = next_lines.pop()
|
||||
next_mode: Optional[Mode] = _parse_mode(next_line)
|
||||
if next_mode:
|
||||
device["associated_modes"].append(next_mode)
|
||||
device["modes"].append(next_mode)
|
||||
else:
|
||||
if re.match(_device_pattern, next_line):
|
||||
next_lines.append(next_line)
|
||||
@@ -412,6 +444,10 @@ def _parse_model(next_lines: List[str], quiet: bool = False) -> Optional[Model]:
|
||||
return None
|
||||
|
||||
next_line = next_lines.pop()
|
||||
|
||||
if _was_parsed(next_line, 'model'):
|
||||
return None
|
||||
|
||||
if not re.match(_edid_head_pattern, next_line):
|
||||
next_lines.append(next_line)
|
||||
return None
|
||||
@@ -448,6 +484,7 @@ _frequencies_pattern = r"(((?P<frequency>\d+\.\d+)(?P<star>\*| |)(?P<plus>\+?)?)
|
||||
def _parse_mode(line: str) -> Optional[Mode]:
|
||||
result = re.match(_mode_pattern, line)
|
||||
frequencies: List[Frequency] = []
|
||||
|
||||
if not result:
|
||||
return None
|
||||
|
||||
@@ -481,7 +518,7 @@ def _parse_mode(line: str) -> Optional[Mode]:
|
||||
return mode
|
||||
|
||||
|
||||
def parse(data: str, raw: bool =False, quiet: bool =False) -> Dict:
|
||||
def parse(data: str, raw: bool = False, quiet: bool = False) -> Dict:
|
||||
"""
|
||||
Main text parsing function
|
||||
|
||||
@@ -500,19 +537,13 @@ def parse(data: str, raw: bool =False, quiet: bool =False) -> Dict:
|
||||
|
||||
linedata = data.splitlines()
|
||||
linedata.reverse() # For popping
|
||||
result: Response = {"screens": [], "unassociated_devices": []}
|
||||
result: Dict = {}
|
||||
|
||||
if jc.utils.has_data(data):
|
||||
result = {"screens": []}
|
||||
while linedata:
|
||||
screen = _parse_screen(linedata)
|
||||
if screen:
|
||||
result["screens"].append(screen)
|
||||
else:
|
||||
device = _parse_device(linedata, quiet)
|
||||
if device:
|
||||
result["unassociated_devices"].append(device)
|
||||
|
||||
if not result["unassociated_devices"] and not result["screens"]:
|
||||
return {}
|
||||
|
||||
return result
|
||||
|
||||
113
jc/utils.py
113
jc/utils.py
@@ -3,6 +3,8 @@ import sys
|
||||
import re
|
||||
import locale
|
||||
import shutil
|
||||
from collections import namedtuple
|
||||
from numbers import Number
|
||||
from datetime import datetime, timezone
|
||||
from textwrap import TextWrapper
|
||||
from functools import lru_cache
|
||||
@@ -274,6 +276,117 @@ def convert_to_bool(value: object) -> bool:
|
||||
return False
|
||||
|
||||
|
||||
# convert_size_to_int from https://github.com/xolox/python-humanfriendly
|
||||
|
||||
# Copyright (c) 2021 Peter Odding
|
||||
|
||||
# Permission is hereby granted, free of charge, to any person obtaining
|
||||
# a copy of this software and associated documentation files (the
|
||||
# "Software"), to deal in the Software without restriction, including
|
||||
# without limitation the rights to use, copy, modify, merge, publish,
|
||||
# distribute, sublicense, and/or sell copies of the Software, and to
|
||||
# permit persons to whom the Software is furnished to do so, subject to
|
||||
# the following conditions:
|
||||
|
||||
# The above copyright notice and this permission notice shall be
|
||||
# included in all copies or substantial portions of the Software.
|
||||
|
||||
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
# LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
# OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
# WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
def convert_size_to_int(size: str, binary: bool = False) -> Optional[int]:
|
||||
"""
|
||||
Parse a human readable data size and return the number of bytes.
|
||||
|
||||
Parameters:
|
||||
|
||||
size: (string) The human readable file size to parse.
|
||||
binary: (boolean) `True` to use binary multiples of bytes
|
||||
(base-2) for ambiguous unit symbols and names,
|
||||
`False` to use decimal multiples of bytes (base-10).
|
||||
Returns:
|
||||
|
||||
integer/None Integer if successful conversion, otherwise None
|
||||
|
||||
This function knows how to parse sizes in bytes, kilobytes, megabytes,
|
||||
gigabytes, terabytes and petabytes. Some examples:
|
||||
|
||||
>>> convert_size_to_int('42')
|
||||
42
|
||||
>>> convert_size_to_int('13b')
|
||||
13
|
||||
>>> convert_size_to_int('5 bytes')
|
||||
5
|
||||
>>> convert_size_to_int('1 KB')
|
||||
1000
|
||||
>>> convert_size_to_int('1 kilobyte')
|
||||
1000
|
||||
>>> convert_size_to_int('1 KiB')
|
||||
1024
|
||||
>>> convert_size_to_int('1 KB', binary=True)
|
||||
1024
|
||||
>>> convert_size_to_int('1.5 GB')
|
||||
1500000000
|
||||
>>> convert_size_to_int('1.5 GB', binary=True)
|
||||
1610612736
|
||||
"""
|
||||
def tokenize(text: str) -> List[str]:
|
||||
tokenized_input: List = []
|
||||
for token in re.split(r'(\d+(?:\.\d+)?)', text):
|
||||
token = token.strip()
|
||||
if re.match(r'\d+\.\d+', token):
|
||||
tokenized_input.append(float(token))
|
||||
elif token.isdigit():
|
||||
tokenized_input.append(int(token))
|
||||
elif token:
|
||||
tokenized_input.append(token)
|
||||
return tokenized_input
|
||||
|
||||
SizeUnit = namedtuple('SizeUnit', 'divider, symbol, name')
|
||||
CombinedUnit = namedtuple('CombinedUnit', 'decimal, binary')
|
||||
disk_size_units = (
|
||||
CombinedUnit(SizeUnit(1000**1, 'KB', 'kilobyte'), SizeUnit(1024**1, 'KiB', 'kibibyte')),
|
||||
CombinedUnit(SizeUnit(1000**2, 'MB', 'megabyte'), SizeUnit(1024**2, 'MiB', 'mebibyte')),
|
||||
CombinedUnit(SizeUnit(1000**3, 'GB', 'gigabyte'), SizeUnit(1024**3, 'GiB', 'gibibyte')),
|
||||
CombinedUnit(SizeUnit(1000**4, 'TB', 'terabyte'), SizeUnit(1024**4, 'TiB', 'tebibyte')),
|
||||
CombinedUnit(SizeUnit(1000**5, 'PB', 'petabyte'), SizeUnit(1024**5, 'PiB', 'pebibyte')),
|
||||
CombinedUnit(SizeUnit(1000**6, 'EB', 'exabyte'), SizeUnit(1024**6, 'EiB', 'exbibyte')),
|
||||
CombinedUnit(SizeUnit(1000**7, 'ZB', 'zettabyte'), SizeUnit(1024**7, 'ZiB', 'zebibyte')),
|
||||
CombinedUnit(SizeUnit(1000**8, 'YB', 'yottabyte'), SizeUnit(1024**8, 'YiB', 'yobibyte')),
|
||||
)
|
||||
tokens = tokenize(size)
|
||||
if tokens and isinstance(tokens[0], Number):
|
||||
# Get the normalized unit (if any) from the tokenized input.
|
||||
normalized_unit = tokens[1].lower() if len(tokens) == 2 and isinstance(tokens[1], str) else ''
|
||||
# If the input contains only a number, it's assumed to be the number of
|
||||
# bytes. The second token can also explicitly reference the unit bytes.
|
||||
if len(tokens) == 1 or normalized_unit.startswith('b'):
|
||||
return int(tokens[0])
|
||||
# Otherwise we expect two tokens: A number and a unit.
|
||||
if normalized_unit:
|
||||
# Convert plural units to singular units, for details:
|
||||
# https://github.com/xolox/python-humanfriendly/issues/26
|
||||
normalized_unit = normalized_unit.rstrip('s')
|
||||
for unit in disk_size_units:
|
||||
# First we check for unambiguous symbols (KiB, MiB, GiB, etc)
|
||||
# and names (kibibyte, mebibyte, gibibyte, etc) because their
|
||||
# handling is always the same.
|
||||
if normalized_unit in (unit.binary.symbol.lower(), unit.binary.name.lower()):
|
||||
return int(tokens[0] * unit.binary.divider)
|
||||
# Now we will deal with ambiguous prefixes (K, M, G, etc),
|
||||
# symbols (KB, MB, GB, etc) and names (kilobyte, megabyte,
|
||||
# gigabyte, etc) according to the caller's preference.
|
||||
if (normalized_unit in (unit.decimal.symbol.lower(), unit.decimal.name.lower()) or
|
||||
normalized_unit.startswith(unit.decimal.symbol[0].lower())):
|
||||
return int(tokens[0] * (unit.binary.divider if binary else unit.decimal.divider))
|
||||
# We failed to parse the size specification.
|
||||
return None
|
||||
|
||||
|
||||
def input_type_check(data: object) -> None:
|
||||
"""Ensure input data is a string. Raises `TypeError` if not."""
|
||||
if not isinstance(data, str):
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user