1
0
mirror of https://github.com/kellyjonbrazil/jc.git synced 2026-04-03 17:44:07 +02:00

Compare commits

...

44 Commits
master ... dev

Author SHA1 Message Date
Kelly Brazil
3f2cd81ca7 doc update 2026-03-30 11:51:22 -07:00
Kelly Brazil
6c8aafa2c5 Merge pull request #688 from queelius/fix/dir-lstrip-drive-letter
Fix dir parser: lstrip strips D: drive letter from path
2026-03-30 11:49:51 -07:00
Kelly Brazil
41e2e16436 Merge branch 'dev' into fix/dir-lstrip-drive-letter 2026-03-30 11:46:32 -07:00
Kelly Brazil
20b625860e Merge pull request #686 from queelius/fix/ifconfig-lstrip-hex-mask
Fix ifconfig hex mask conversion: lstrip('0x') -> [2:]
2026-03-30 11:44:57 -07:00
Kelly Brazil
0d703cd3a8 Merge branch 'dev' into fix/ifconfig-lstrip-hex-mask 2026-03-30 11:41:36 -07:00
Kelly Brazil
7b3d345d62 doc update 2026-03-30 11:36:22 -07:00
Kelly Brazil
53b6f1b329 Merge pull request #689 from ReinerBRO/fix/pip-show-files-section
Handle pip show Files sections
2026-03-30 11:29:59 -07:00
Kelly Brazil
ecc267b3cb Merge branch 'dev' into fix/pip-show-files-section 2026-03-30 11:20:09 -07:00
Kelly Brazil
5d2496b5f7 doc update 2026-03-30 10:52:43 -07:00
Kelly Brazil
810eeba724 version bump 2026-03-30 10:50:56 -07:00
Kelly Brazil
fb5c1b9c94 Merge pull request #692 from juliosuas/fix/ifconfig-lstrip-hex-mask
fix: use [2:] instead of lstrip('0x') to strip hex prefix in ifconfig parser
2026-03-30 10:46:54 -07:00
Kelly Brazil
f797127ca9 Merge branch 'dev' into fix/ifconfig-lstrip-hex-mask 2026-03-30 10:39:54 -07:00
Julio César Suástegui
56bd860a5e fix: use [2:] instead of lstrip('0x') to strip hex prefix in ifconfig parser
str.lstrip('0x') strips any combination of '0' and 'x' characters from
the left, not the literal two-character prefix '0x'. For subnet masks
where the hex digits start with '0' (e.g. '0x00000000' for a /0 mask),
lstrip strips all leading zeros along with the 'x', producing an empty
string instead of '00000000'.

Replace with a slice [2:] which correctly removes exactly the first two
characters ('0x') regardless of what follows.

This bug affected both the legacy ipv4_mask field and the ipv4[] list
items in _process() (lines 267 and 292).

Fixes #685
2026-03-30 03:07:11 -06:00
ReinerBRO
9fe659d625 Handle pip show files sections 2026-03-26 14:14:11 +08:00
Alex Towell
128e36404d Fix dir parser stripping D: drive letter from parent directory path
lstrip(" Directory of ") strips any character in the set
{' ','D','i','r','e','c','t','o','y','f'}, which incorrectly removes
the 'D' from D:\ paths. Use fixed-length prefix removal with [len():]
instead.

Fixes #687

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 03:14:43 -05:00
Alex Towell
2f9377cb67 Fix ifconfig hex mask conversion using lstrip('0x') instead of [2:]
lstrip('0x') strips any character in {'0','x'} from the left, not the
literal prefix "0x". This causes incorrect mask conversion for masks
with leading zero hex digits (e.g. 0x00000000 -> empty string instead
of 0.0.0.0).

Replace lstrip('0x') with [2:] to correctly remove only the '0x'
prefix. Fixes both the legacy ipv4_mask field and the ipv4[] list.

Fixes #685

Co-Authored-By: Claude Opus 4.6 (1M context) <noreply@anthropic.com>
2026-03-25 03:12:34 -05:00
Kelly Brazil
585ff83a2e update changelog 2026-03-16 12:29:46 -07:00
Kelly Brazil
45b23e8b3c version bump 2026-03-16 12:18:49 -07:00
Kelly Brazil
b9eec1a5cd Merge pull request #673 from native-api/haslib_mode
Correctly parse mode indicators in hashsum output
2026-03-16 12:15:18 -07:00
Kelly Brazil
47545833ed Merge branch 'dev' into haslib_mode 2026-03-16 12:05:52 -07:00
Kelly Brazil
6f8e4fb2ed Merge pull request #679 from jylenhof/master
docs(README): add mise alternate installation documentation
2026-03-16 12:01:45 -07:00
Kelly Brazil
4f7821ac8e Merge branch 'dev' into master 2026-03-16 12:00:13 -07:00
Kelly Brazil
7f2722ff96 remove int_value field and add tests 2026-03-13 15:16:54 -07:00
Kelly Brazil
1d19de3005 add typeset and declare command parser 2026-03-10 18:16:53 -07:00
Kelly Brazil
e01287b329 add stats fields to json output. #676 2026-03-09 17:35:33 -07:00
Kelly Brazil
441bcbde80 fix indent on help text so lines don't wrap over 80 chars. Also fix mypy uncovered value assignment issue. 2026-03-06 15:50:52 -08:00
Kelly Brazil
936432d879 Fix parsing blank targe in verbose output #675 2026-03-06 12:59:16 -08:00
Kelly Brazil
51543437d7 remove comment 2026-02-27 14:57:59 -08:00
Kelly Brazil
dd9229f161 Merge branch 'dev' of https://github.com/kellyjonbrazil/jc into dev 2026-02-27 14:55:50 -08:00
Kelly Brazil
3d9554baec force tests 2026-02-27 14:55:45 -08:00
Kelly Brazil
e33a81269c update os matrix and python versions for tests 2026-02-27 14:54:15 -08:00
Kelly Brazil
f3352352ed fix unknown flags throwing key error (#681) 2026-02-27 12:08:08 -08:00
Kelly Brazil
1c0a35dff8 version bump 2026-02-27 11:37:58 -08:00
jylenhof
8ba75794a6 docs(README): add mise alternate installation documentation
Signed-off-by: jylenhof <jygithub@lenhof.eu.org>
2026-01-10 11:07:10 +01:00
Ivan Pozdeev
77af5ac9d3 Revert ".gitignore: + Pycharm metadata"
This reverts commit 0363ddcc6a.
2025-12-16 03:26:09 +03:00
Ivan Pozdeev
4067bfed9f Support friendly names for modes; rename files and tests to indicate non-default modes 2025-12-16 02:43:16 +03:00
Ivan Pozdeev
0363ddcc6a .gitignore: + Pycharm metadata 2025-12-16 02:07:14 +03:00
Ivan Pozdeev
04303efa75 Fix tests 2025-12-16 01:28:18 +03:00
Ivan Pozdeev
956f74358b Add tests for other mode indicators 2025-12-15 23:42:13 +03:00
Ivan Pozdeev
4fe3377029 Support other mode symbols -- e.g. 'U' and '^' from Perl shasum
Accept any symbol for future-proofing
2025-12-15 23:31:16 +03:00
Ivan Pozdeev
1944a7145e Fix erroneous redundant find&replace 2025-12-11 07:23:53 +03:00
Ivan Pozdeev
3d698e50c0 fix 2025-12-11 07:12:55 +03:00
Ivan Pozdeev
5945ded816 Fix incompatibility with 3.6; more specific regex 2025-12-11 07:10:36 +03:00
Ivan Pozdeev
ecd0e03c66 Parse the mode indicator in *sum output
In accordance with shasum(1) manpage
For legacy md5, set it to blank
2025-12-11 07:01:26 +03:00
61 changed files with 1254 additions and 178 deletions

View File

@@ -9,69 +9,14 @@ on:
- "**/*.py"
jobs:
very_old_python:
if: github.event.pull_request.draft == false
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [macos-13, windows-2022]
python-version: ["3.6"]
steps:
- uses: actions/checkout@v3
- name: "Set up timezone to America/Los_Angeles"
uses: szenius/set-timezone@v1.2
with:
timezoneLinux: "America/Los_Angeles"
timezoneMacos: "America/Los_Angeles"
timezoneWindows: "Pacific Standard Time"
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Test with unittest
run: |
python -m unittest discover tests
old_python:
if: github.event.pull_request.draft == false
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [macos-13, ubuntu-22.04, windows-2022]
python-version: ["3.7", "3.8", "3.9", "3.10"]
steps:
- uses: actions/checkout@v3
- name: "Set up timezone to America/Los_Angeles"
uses: szenius/set-timezone@v1.2
with:
timezoneLinux: "America/Los_Angeles"
timezoneMacos: "America/Los_Angeles"
timezoneWindows: "Pacific Standard Time"
- name: Set up Python ${{ matrix.python-version }}
uses: actions/setup-python@v4
with:
python-version: ${{ matrix.python-version }}
- name: Install dependencies
run: |
python -m pip install --upgrade pip
pip install -r requirements.txt
- name: Test with unittest
run: |
python -m unittest discover tests
latest_python:
if: github.event.pull_request.draft == false
runs-on: ${{ matrix.os }}
strategy:
matrix:
os: [macos-latest, ubuntu-latest, windows-latest]
python-version: ["3.11", "3.12"]
os: [macos-15-intel, macos-latest, ubuntu-latest, ubuntu-24.04-arm, windows-latest]
python-version: ["3.11", "3.12", "3.13", "3.14"]
steps:
- uses: actions/checkout@v3
@@ -92,3 +37,59 @@ jobs:
- name: Test with unittest
run: |
python -m unittest discover tests
# very_old_python:
# if: github.event.pull_request.draft == false
# runs-on: ${{ matrix.os }}
# strategy:
# matrix:
# os: [macos-13, windows-2022]
# python-version: ["3.6"]
# steps:
# - uses: actions/checkout@v3
# - name: "Set up timezone to America/Los_Angeles"
# uses: szenius/set-timezone@v1.2
# with:
# timezoneLinux: "America/Los_Angeles"
# timezoneMacos: "America/Los_Angeles"
# timezoneWindows: "Pacific Standard Time"
# - name: Set up Python ${{ matrix.python-version }}
# uses: actions/setup-python@v4
# with:
# python-version: ${{ matrix.python-version }}
# - name: Install dependencies
# run: |
# python -m pip install --upgrade pip
# pip install -r requirements.txt
# - name: Test with unittest
# run: |
# python -m unittest discover tests
# old_python:
# if: github.event.pull_request.draft == false
# runs-on: ${{ matrix.os }}
# strategy:
# matrix:
# os: [macos-13, ubuntu-22.04, windows-2022]
# python-version: ["3.7", "3.8", "3.9", "3.10"]
# steps:
# - uses: actions/checkout@v3
# - name: "Set up timezone to America/Los_Angeles"
# uses: szenius/set-timezone@v1.2
# with:
# timezoneLinux: "America/Los_Angeles"
# timezoneMacos: "America/Los_Angeles"
# timezoneWindows: "Pacific Standard Time"
# - name: Set up Python ${{ matrix.python-version }}
# uses: actions/setup-python@v4
# with:
# python-version: ${{ matrix.python-version }}
# - name: Install dependencies
# run: |
# python -m pip install --upgrade pip
# pip install -r requirements.txt
# - name: Test with unittest
# run: |
# python -m unittest discover tests

View File

@@ -1,6 +1,17 @@
jc changelog
202501012 v1.25.6
20260330 v1.25.7
- Add `typeset` and `declare` Bash internal command parser to convert variables
simple arrays, and associative arrays along with object metadata
- Enhance `pip-show` command parser to add `-f` show files support
- Enhance `rsync` and `rsync-s` parsers to add `--stats` or `--info=stats[1-3]` fields
- Fix `hashsum` command parser to correctly parse the `mode` indicator
- Fix `dir` command parser for incorrect stripping of the `D:` drive letter
- Fix `proc-pid-smaps` proc parser when unknown VmFlags are output
- Fix `ifconfig` command parser for incorrect stripping of leading zeros in some hex numbers
- Fix `iptables` command parser when Target is blank and verbose output is used
20251012 v1.25.6
- Add `net-localgroup` Windows command parser
- Add `net-user` Windows command parser
- Add `route-print` Windows command parser

View File

@@ -123,6 +123,7 @@ pip3 install jc
| FreeBSD | `portsnap fetch update && cd /usr/ports/textproc/py-jc && make install clean` |
| Ansible filter plugin | `ansible-galaxy collection install community.general` |
| FortiSOAR connector | Install from FortiSOAR Connector Marketplace |
| Mise-en-place (Linux/MacOS) | `mise use -g jc@latest` |
> For more OS Packages, see https://repology.org/project/jc/versions.

View File

@@ -300,8 +300,8 @@ class JcCli():
Pages the parser documentation if a parser is found in the arguments,
otherwise the general help text is printed.
"""
self.indent = 4
self.pad = 22
self.indent = 2
self.pad = 21
if self.show_categories:
utils._safe_print(self.parser_categories_text())
@@ -569,7 +569,11 @@ class JcCli():
if self.debug:
raise
error_msg = os.strerror(e.errno)
if e.errno:
error_msg = os.strerror(e.errno)
else:
error_msg = "no further information provided"
utils.error_message([
f'"{file}" file could not be opened: {error_msg}.'
])
@@ -594,7 +598,11 @@ class JcCli():
if self.debug:
raise
error_msg = os.strerror(e.errno)
if e.errno:
error_msg = os.strerror(e.errno)
else:
error_msg = "no further information provided"
utils.error_message([
f'"{self.magic_run_command_str}" command could not be run: {error_msg}.'
])

View File

@@ -62,52 +62,52 @@ jc converts the output of many commands, file-types, and strings to JSON or YAML
Usage:
Standard syntax:
Standard syntax:
COMMAND | jc [SLICE] [OPTIONS] PARSER
COMMAND | jc [SLICE] [OPTIONS] PARSER
cat FILE | jc [SLICE] [OPTIONS] PARSER
cat FILE | jc [SLICE] [OPTIONS] PARSER
echo STRING | jc [SLICE] [OPTIONS] PARSER
echo STRING | jc [SLICE] [OPTIONS] PARSER
Magic syntax:
Magic syntax:
jc [SLICE] [OPTIONS] COMMAND
jc [SLICE] [OPTIONS] COMMAND
jc [SLICE] [OPTIONS] /proc/<path-to-procfile>
jc [SLICE] [OPTIONS] /proc/<path-to-procfile>
Parsers:
'''
slicetext_string: str = '''\
Slice:
[start]:[end]
[start]:[end]
start: [[-]index] - Zero-based start line, negative index for
counting from the end
start: [[-]index] - Zero-based start line, negative index for
counting from the end
end: [[-]index] - Zero-based end line (excluding the index),
negative index for counting from the end
end: [[-]index] - Zero-based end line (excluding the index),
negative index for counting from the end
'''
helptext_end_string: str = '''\
Examples:
Standard Syntax:
$ dig www.google.com | jc --pretty --dig
$ cat /proc/meminfo | jc --pretty --proc
Standard Syntax:
$ dig www.google.com | jc --pretty --dig
$ cat /proc/meminfo | jc --pretty --proc
Magic Syntax:
$ jc --pretty dig www.google.com
$ jc --pretty /proc/meminfo
Magic Syntax:
$ jc --pretty dig www.google.com
$ jc --pretty /proc/meminfo
Line Slicing:
$ cat output.txt | jc 4:15 --<PARSER> # Parse from line 4 to 14
# with <PARSER> (zero-based)
Line Slicing:
$ cat output.txt | jc 4:15 --<PARSER> # Parse from line 4 to 14
# with <PARSER> (zero-based)
Parser Documentation:
$ jc --help --dig
Parser Documentation:
$ jc --help --dig
More Help:
$ jc -hh # show hidden parsers
$ jc -hhh # list parsers by category tags
More Help:
$ jc -hh # show hidden parsers
$ jc -hhh # list parsers by category tags
'''

View File

@@ -10,7 +10,7 @@ from jc import appdirs
from jc import utils
__version__ = '1.25.6'
__version__ = '1.25.7'
parsers: List[str] = [
'acpi',
@@ -216,6 +216,7 @@ parsers: List[str] = [
'traceroute',
'traceroute-s',
'tune2fs',
'typeset',
'udevadm',
'ufw',
'ufw-appinfo',

View File

@@ -121,7 +121,7 @@ import jc.utils
class info():
"""Provides parser metadata (version, author, etc.)"""
version = '1.6'
version = '1.7'
description = '`dir` command parser'
author = 'Rasheed Elsaleh'
author_email = 'rasheed@rebelliondefense.com'
@@ -183,8 +183,8 @@ def parse(data, raw=False, quiet=False):
if jc.utils.has_data(data):
for line in data.splitlines():
if line.startswith(" Directory of"):
parent_dir = line.lstrip(" Directory of ")
if line.startswith(" Directory of "):
parent_dir = line[len(" Directory of "):]
continue
# skip lines that don't start with a date
if not re.match(r'^\d{2}/\d{2}/\d{4}', line):

View File

@@ -28,6 +28,7 @@ Schema:
[
{
"filename": string,
"mode": string,
"hash": string,
}
]
@@ -38,37 +39,44 @@ Examples:
[
{
"filename": "devtoolset-3-gcc-4.9.2-6.el7.x86_64.rpm",
"mode": "text",
"hash": "65fc958c1add637ec23c4b137aecf3d3"
},
{
"filename": "digout",
"mode": "text",
"hash": "5b9312ee5aff080927753c63a347707d"
},
{
"filename": "dmidecode.out",
"mode": "text",
"hash": "716fd11c2ac00db109281f7110b8fb9d"
},
{
"filename": "file with spaces in the name",
"mode": "text",
"hash": "d41d8cd98f00b204e9800998ecf8427e"
},
{
"filename": "id-centos.out",
"mode": "text",
"hash": "4295be239a14ad77ef3253103de976d2"
},
{
"filename": "ifcfg.json",
"mode": "text",
"hash": "01fda0d9ba9a75618b072e64ff512b43"
},
...
]
"""
import re
import jc.utils
class info():
"""Provides parser metadata (version, author, etc.)"""
version = '1.2'
version = '1.3'
description = 'hashsum command parser (`md5sum`, `shasum`, etc.)'
author = 'Kelly Brazil'
author_email = 'kellyjonbrazil@gmail.com'
@@ -81,6 +89,15 @@ class info():
__version__ = info.version
_mode_friendly_names = {
" ": "text",
"*": "binary",
# Perl shasum -- specific
"U": "universal",
"^": "bits",
# BSD-style format only supports binary mode
None: "binary"
}
def _process(proc_data):
"""
@@ -95,7 +112,9 @@ def _process(proc_data):
List of Dictionaries. Structured data to conform to the schema.
"""
# no further processing for this parser
for entry in proc_data:
entry['mode'] = _mode_friendly_names.get(entry['mode'],entry['mode'])
return proc_data
@@ -127,18 +146,20 @@ def parse(data, raw=False, quiet=False):
file_name = line.split('=', maxsplit=1)[0].strip()
file_name = file_name[5:]
file_name = file_name[:-1]
# filler, legacy md5 always uses binary mode
file_mode = None
# standard md5sum and shasum command output
else:
file_hash = line.split(maxsplit=1)[0]
file_name = line.split(maxsplit=1)[1]
m = re.match('([0-9a-f]+) (.)(.*)$', line)
if not m:
raise ValueError(f'Invalid line format: "{line}"')
file_hash, file_mode, file_name = m.groups()
item = {
'filename': file_name,
'mode': file_mode,
'hash': file_hash
}
raw_output.append(item)
if raw:
return raw_output
else:
return _process(raw_output)
return raw_output if raw else _process(raw_output)

View File

@@ -219,7 +219,7 @@ import jc.utils
class info():
"""Provides parser metadata (version, author, etc.)"""
version = '2.4'
version = '2.5'
description = '`ifconfig` command parser'
author = 'Kelly Brazil'
author_email = 'kellyjonbrazil@gmail.com'
@@ -264,7 +264,7 @@ def _process(proc_data: List[JSONDictType]) -> List[JSONDictType]:
try:
if entry['ipv4_mask'].startswith('0x'):
new_mask = entry['ipv4_mask']
new_mask = new_mask.lstrip('0x')
new_mask = new_mask[2:]
new_mask = '.'.join(str(int(i, 16)) for i in [new_mask[i:i + 2] for i in range(0, len(new_mask), 2)])
entry['ipv4_mask'] = new_mask
except (ValueError, TypeError, AttributeError):
@@ -289,7 +289,7 @@ def _process(proc_data: List[JSONDictType]) -> List[JSONDictType]:
try:
if ip_address['mask'].startswith('0x'):
new_mask = ip_address['mask']
new_mask = new_mask.lstrip('0x')
new_mask = new_mask[2:]
new_mask = '.'.join(str(int(i, 16)) for i in [new_mask[i:i + 2] for i in range(0, len(new_mask), 2)])
ip_address['mask'] = new_mask
except (ValueError, TypeError, AttributeError):

View File

@@ -173,7 +173,7 @@ import jc.utils
class info():
"""Provides parser metadata (version, author, etc.)"""
version = '1.12'
version = '1.13'
description = '`iptables` command parser'
author = 'Kelly Brazil'
author_email = 'kellyjonbrazil@gmail.com'
@@ -294,9 +294,16 @@ def parse(data, raw=False, quiet=False):
else:
# sometimes the "target" column is blank. Stuff in a dummy character
if headers[0] == 'target' and line.startswith(' '):
opt_values = {'--', '-f', '!f'}
line_split = line.split()
if headers[0] == 'target' and line.startswith(' '): # standard output
line = '\u2063' + line
elif headers[0] == 'pkts' and line_split[3] in opt_values: # verbose output
first_section = line_split[:2]
second_section = line_split[2:]
line = ' '.join(first_section) + ' \u2063 ' + ' '.join(second_section)
rule = line.split(maxsplit=len(headers) - 1)
temp_rule = dict(zip(headers, rule))
if temp_rule:

View File

@@ -26,7 +26,10 @@ Schema:
"license": string,
"location": string,
"requires": string,
"required_by": string
"required_by": string,
"files": [
string
]
}
]
@@ -60,13 +63,13 @@ Examples:
}
]
"""
from typing import List, Dict, Optional
from typing import List, Dict
import jc.utils
class info():
"""Provides parser metadata (version, author, etc.)"""
version = '1.5'
version = '1.6'
description = '`pip show` command parser'
author = 'Kelly Brazil'
author_email = 'kellyjonbrazil@gmail.com'
@@ -120,6 +123,22 @@ def parse(
last_key: str = ''
last_key_data: List = []
def flush_last_key_data() -> None:
"""Append buffered continuation lines to the previous field."""
nonlocal last_key_data
if not last_key_data:
return
if last_key == 'files':
package[last_key].extend(last_key_data)
else:
if not isinstance(package[last_key], str):
package[last_key] = ''
package[last_key] = package[last_key] + '\n' + '\n'.join(last_key_data)
last_key_data = []
# Clear any blank lines
cleandata = list(filter(None, data.splitlines()))
@@ -127,8 +146,7 @@ def parse(
for row in cleandata:
if row.startswith('---'):
if last_key_data:
package[last_key] = package[last_key] + '\n' + '\n'.join(last_key_data)
flush_last_key_data()
raw_output.append(package)
package = {}
@@ -137,17 +155,17 @@ def parse(
continue
if not row.startswith(' '):
item_key = row.split(': ', maxsplit=1)[0].lower().replace('-', '_')
item_value: Optional[str] = row.split(': ', maxsplit=1)[1]
item_key, item_value = row.split(':', maxsplit=1)
item_key = item_key.lower().replace('-', '_')
item_value = item_value.lstrip()
if item_value == '':
if item_key == 'files':
item_value = []
elif item_value == '':
item_value = None
if last_key_data and last_key != item_key:
if not isinstance(package[last_key], str):
package[last_key] = ''
package[last_key] = package[last_key] + '\n' + '\n'.join(last_key_data)
last_key_data = []
flush_last_key_data()
package[item_key] = item_value
last_key = item_key
@@ -158,8 +176,7 @@ def parse(
continue
if package:
if last_key_data:
package[last_key] = package[last_key] + '\n' + '\n'.join(last_key_data)
flush_last_key_data()
raw_output.append(package)

View File

@@ -168,7 +168,7 @@ import jc.utils
class info():
"""Provides parser metadata (version, author, etc.)"""
version = '1.0'
version = '1.1'
description = '`/proc/<pid>/smaps` file parser'
author = 'Kelly Brazil'
author_email = 'kellyjonbrazil@gmail.com'
@@ -205,33 +205,46 @@ def _process(proc_data: List[Dict]) -> List[Dict]:
vmflags_map = {
'rd': 'readable',
'wr': 'writeable',
'ex': 'executable',
'sh': 'shared',
'mr': 'may read',
'mw': 'may write',
'me': 'may execute',
'ms': 'may share',
'mp': 'MPX-specific VMA',
'gd': 'stack segment growns down',
'pf': 'pure PFN range',
'dw': 'disabled write to the mapped file',
'lo': 'pages are locked in memory',
'io': 'memory mapped I/O area',
'sr': 'sequential read advise provided',
'rr': 'random read advise provided',
'dc': 'do not copy area on fork',
'de': 'do not expand area on remapping',
'ac': 'area is accountable',
'nr': 'swap space is not reserved for the area',
'ht': 'area uses huge tlb pages',
'ar': 'architecture specific flag',
'dd': 'do not include area into core dump',
'sd': 'soft-dirty flag',
'mm': 'mixed map area',
'hg': 'huge page advise flag',
'nh': 'no-huge page advise flag',
'mg': 'mergable advise flag'
'wr': 'writeable',
'ex': 'executable',
'sh': 'shared',
'mr': 'may read',
'mw': 'may write',
'me': 'may execute',
'ms': 'may share',
'mp': 'MPX-specific VMA',
'gd': 'stack segment growns down',
'pf': 'pure PFN range',
'dw': 'disabled write to the mapped file',
'lo': 'pages are locked in memory',
'io': 'memory mapped I/O area',
'sr': 'sequential read advise provided',
'rr': 'random read advise provided',
'dc': 'do not copy area on fork',
'de': 'do not expand area on remapping',
'ac': 'area is accountable',
'nr': 'swap space is not reserved for the area',
'ht': 'area uses huge tlb pages',
'sf': 'perform synchronous page faults',
'nl': 'non-linear mapping',
'ar': 'architecture specific flag',
'wf': 'wipe on fork',
'dd': 'do not include area into core dump',
'sd': 'soft-dirty flag',
'mm': 'mixed map area',
'hg': 'huge page advise flag',
'nh': 'no-huge page advise flag',
'mg': 'mergable advise flag',
'bt': 'arm64 BTI guarded page',
'mt': 'arm64 MTE allocation tags are enabled',
'um': 'userfaultfd missing pages tracking',
'uw': 'userfaultfd wprotect pages tracking',
'ui': 'userfaultfd minor fault',
'ss': 'shadow/guarded control stack page',
'sl': 'sealed',
'lf': 'lock on fault pages',
'dp': 'always lazily freeable mapping',
'gu': 'maybe contains guard regions'
}
for entry in proc_data:
@@ -245,7 +258,7 @@ def _process(proc_data: List[Dict]) -> List[Dict]:
if 'VmFlags' in entry:
entry['VmFlags'] = entry['VmFlags'].split()
entry['VmFlags_pretty'] = [vmflags_map[x] for x in entry['VmFlags']]
entry['VmFlags_pretty'] = [vmflags_map.get(x, x) for x in entry['VmFlags']]
return proc_data

View File

@@ -4,6 +4,8 @@ Supports the `-i` or `--itemize-changes` options with all levels of
verbosity. This parser will process the `STDOUT` output or a log file
generated with the `--log-file` option.
The `--stats` or `--info=stats[1-3]` options are also supported.
Usage (cli):
$ rsync -i -a source/ dest | jc --rsync
@@ -37,7 +39,21 @@ Schema:
"false_alarms": integer,
"data": integer,
"bytes_sec": float,
"speedup": float
"speedup": float,
"total_files": integer,
"regular_files": integer,
"dir_files": integer,
"total_created_files": integer,
"created_regular_files": integer,
"created_dir_files": integer,
"deleted_files": integer,
"transferred_files": integer,
"transferred_file_size": integer,
"literal_data": integer,
"matched_data": integer,
"file_list_size": integer,
"file_list_generation_time": float,
"file_list_transfer_time": float,
},
"files": [
{
@@ -62,6 +78,8 @@ Schema:
}
]
Size values are in bytes.
[0] 'file sent', 'file received', 'local change or creation',
'hard link', 'not updated', 'message'
[1] 'file', 'directory', 'symlink', 'device', 'special file'
@@ -137,7 +155,7 @@ import jc.utils
class info():
"""Provides parser metadata (version, author, etc.)"""
version = '1.2'
version = '1.3'
description = '`rsync` command parser'
author = 'Kelly Brazil'
author_email = 'kellyjonbrazil@gmail.com'
@@ -163,10 +181,16 @@ def _process(proc_data: List[Dict]) -> List[Dict]:
"""
int_list = {
'process', 'sent', 'received', 'total_size', 'matches', 'hash_hits',
'false_alarms', 'data'
'false_alarms', 'data', 'total_files', 'regular_files', 'dir_files',
'total_created_files', 'created_regular_files', 'created_dir_files',
'deleted_files', 'transferred_files', 'transferred_file_size',
'literal_data', 'matched_data', 'file_list_size'
}
float_list = {'bytes_sec', 'speedup'}
float_list = {
'bytes_sec', 'speedup', 'file_list_generation_time',
'file_list_transfer_time'
}
for item in proc_data:
for key in item['summary']:
@@ -338,6 +362,17 @@ def parse(
stat2_line_log_v_re = re.compile(r'(?P<date>\d\d\d\d/\d\d/\d\d)\s+(?P<time>\d\d:\d\d:\d\d)\s+\[(?P<process>\d+)\]\s+sent\s+(?P<sent>[\d,]+)\s+bytes\s+received\s+(?P<received>[\d,]+)\s+bytes\s+(?P<bytes_sec>[\d,.]+)\s+bytes/sec')
stat3_line_log_v_re = re.compile(r'(?P<date>\d\d\d\d/\d\d/\d\d)\s+(?P<time>\d\d:\d\d:\d\d)\s+\[(?P<process>\d+)]\s+total\s+size\s+is\s+(?P<total_size>[\d,]+)\s+speedup\s+is\s+(?P<speedup>[\d,.]+)')
stat_ex_files_number_re = re.compile(r'Number\sof\sfiles:\s(?P<files_total>[,0123456789]+)\s\(reg:\s(?P<files_regular>[,0123456789]+),\sdir:\s(?P<files_dir>[,0123456789]+)\)$')
stat_ex_files_created_re = re.compile(r'Number\sof\screated\sfiles:\s(?P<files_created_total>[,0123456789]+)\s\(reg:\s(?P<files_created_regular>[,0123456789]+),\sdir:\s(?P<files_created_dir>[,0123456789]+)\)$')
stat_ex_files_deleted_re = re.compile(r'Number\sof\sdeleted\sfiles:\s(?P<files_deleted>[,0123456789]+)$')
stat_ex_files_transferred_re = re.compile(r'Number\sof\sregular\sfiles\stransferred:\s(?P<files_transferred>[,0123456789]+)$')
stat_ex_files_transferred_size_re = re.compile(r'Total\sfile\ssize:\s(?P<files_transferred_size>[,.0123456789]+\S?)\sbytes$')
stat_ex_literal_data_re = re.compile(r'Literal\sdata:\s(?P<literal_data>[,.0123456789]+\S?)\sbytes$')
stat_ex_matched_data_re = re.compile(r'Matched\sdata:\s(?P<matched_data>[,.0123456789]+\S?)\sbytes$')
stat_ex_file_list_size_re = re.compile(r'File\slist\ssize:\s(?P<file_list_size>[,.0123456789]+\S?)$')
stat_ex_file_list_generation_time_re = re.compile(r'File\slist\sgeneration\stime:\s(?P<file_list_generation_time>[,.0123456789]+\S?)\sseconds$')
stat_ex_file_list_transfer_time_re = re.compile(r'File\slist\stransfer\stime:\s(?P<file_list_transfer_time>[,.0123456789]+\S?)\sseconds$')
if jc.utils.has_data(data):
for line in filter(None, data.splitlines()):
@@ -451,11 +486,11 @@ def parse(
stat1_line = stat1_line_re.match(line)
if stat1_line:
rsync_run['summary'] = {
rsync_run['summary'].update({
'sent': stat1_line.group('sent'),
'received': stat1_line.group('received'),
'bytes_sec': stat1_line.group('bytes_sec')
}
})
continue
stat2_line = stat2_line_re.match(line)
@@ -466,11 +501,11 @@ def parse(
stat1_line_simple = stat1_line_simple_re.match(line)
if stat1_line_simple:
rsync_run['summary'] = {
rsync_run['summary'].update({
'sent': stat1_line_simple.group('sent'),
'received': stat1_line_simple.group('received'),
'bytes_sec': stat1_line_simple.group('bytes_sec')
}
})
continue
stat2_line_simple = stat2_line_simple_re.match(line)
@@ -481,19 +516,19 @@ def parse(
stat_line_log = stat_line_log_re.match(line)
if stat_line_log:
rsync_run['summary'] = {
rsync_run['summary'].update({
'date': stat_line_log.group('date'),
'time': stat_line_log.group('time'),
'process': stat_line_log.group('process'),
'sent': stat_line_log.group('sent'),
'received': stat_line_log.group('received'),
'total_size': stat_line_log.group('total_size')
}
})
continue
stat1_line_log_v = stat1_line_log_v_re.match(line)
if stat1_line_log_v:
rsync_run['summary'] = {
rsync_run['summary'].update({
'date': stat1_line_log_v.group('date'),
'time': stat1_line_log_v.group('time'),
'process': stat1_line_log_v.group('process'),
@@ -501,7 +536,7 @@ def parse(
'hash_hits': stat1_line_log_v.group('hash_hits'),
'false_alarms': stat1_line_log_v.group('false_alarms'),
'data': stat1_line_log_v.group('data')
}
})
continue
stat2_line_log_v = stat2_line_log_v_re.match(line)
@@ -517,6 +552,61 @@ def parse(
rsync_run['summary']['speedup'] = stat3_line_log_v.group('speedup')
continue
# extra stats lines when using rsync --stats or --info=stats[1-3]
stat_ex_files_number_v = stat_ex_files_number_re.match(line)
if stat_ex_files_number_v:
rsync_run['summary']['total_files'] = stat_ex_files_number_v.group('files_total')
rsync_run['summary']['regular_files'] = stat_ex_files_number_v.group('files_regular')
rsync_run['summary']['dir_files'] = stat_ex_files_number_v.group('files_dir')
continue
stat_ex_files_created_v = stat_ex_files_created_re.match(line)
if stat_ex_files_created_v:
rsync_run['summary']['total_created_files'] = stat_ex_files_created_v.group('files_created_total')
rsync_run['summary']['created_regular_files'] = stat_ex_files_created_v.group('files_created_regular')
rsync_run['summary']['created_dir_files'] = stat_ex_files_created_v.group('files_created_dir')
continue
stat_ex_files_deleted_v = stat_ex_files_deleted_re.match(line)
if stat_ex_files_deleted_v:
rsync_run['summary']['deleted_files'] = stat_ex_files_deleted_v.group('files_deleted')
continue
stat_ex_files_transferred_v = stat_ex_files_transferred_re.match(line)
if stat_ex_files_transferred_v:
rsync_run['summary']['transferred_files'] = stat_ex_files_transferred_v.group('files_transferred')
continue
stat_ex_files_transferred_size_v = stat_ex_files_transferred_size_re.match(line)
if stat_ex_files_transferred_size_v:
rsync_run['summary']['transferred_file_size'] = stat_ex_files_transferred_size_v.group('files_transferred_size')
continue
stat_ex_literal_data_v = stat_ex_literal_data_re.match(line)
if stat_ex_literal_data_v:
rsync_run['summary']['literal_data'] = stat_ex_literal_data_v.group('literal_data')
continue
stat_ex_matched_data_v = stat_ex_matched_data_re.match(line)
if stat_ex_matched_data_v:
rsync_run['summary']['matched_data'] = stat_ex_matched_data_v.group('matched_data')
continue
stat_ex_file_list_size_v = stat_ex_file_list_size_re.match(line)
if stat_ex_file_list_size_v:
rsync_run['summary']['file_list_size'] = stat_ex_file_list_size_v.group('file_list_size')
continue
stat_ex_file_list_generation_time_v = stat_ex_file_list_generation_time_re.match(line)
if stat_ex_file_list_generation_time_v:
rsync_run['summary']['file_list_generation_time'] = stat_ex_file_list_generation_time_v.group('file_list_generation_time')
continue
stat_ex_file_list_transfer_time_v = stat_ex_file_list_transfer_time_re.match(line)
if stat_ex_file_list_transfer_time_v:
rsync_run['summary']['file_list_transfer_time'] = stat_ex_file_list_transfer_time_v.group('file_list_transfer_time')
continue
raw_output.append(rsync_run)
# cleanup blank entries

View File

@@ -7,6 +7,8 @@ Supports the `-i` or `--itemize-changes` options with all levels of
verbosity. This parser will process the `STDOUT` output or a log file
generated with the `--log-file` option.
The `--stats` or `--info=stats[1-3]` options are also supported.
Usage (cli):
$ rsync -i -a source/ dest | jc --rsync-s
@@ -64,6 +66,8 @@ Schema:
}
}
Size values are in bytes.
[0] 'file sent', 'file received', 'local change or creation',
'hard link', 'not updated', 'message'
[1] 'file', 'directory', 'symlink', 'device', 'special file'
@@ -88,7 +92,7 @@ from jc.streaming import (
class info():
"""Provides parser metadata (version, author, etc.)"""
version = '1.3'
version = '1.4'
description = '`rsync` command streaming parser'
author = 'Kelly Brazil'
author_email = 'kellyjonbrazil@gmail.com'
@@ -114,10 +118,16 @@ def _process(proc_data: Dict) -> Dict:
"""
int_list = {
'process', 'sent', 'received', 'total_size', 'matches', 'hash_hits',
'false_alarms', 'data'
'false_alarms', 'data', 'total_files', 'regular_files', 'dir_files',
'total_created_files', 'created_regular_files', 'created_dir_files',
'deleted_files', 'transferred_files', 'transferred_file_size',
'literal_data', 'matched_data', 'file_list_size'
}
float_list = {'bytes_sec', 'speedup'}
float_list = {
'bytes_sec', 'speedup', 'file_list_generation_time',
'file_list_transfer_time'
}
for key in proc_data.copy():
if key in int_list:
@@ -281,6 +291,17 @@ def parse(
stat2_line_log_v_re = re.compile(r'(?P<date>\d\d\d\d/\d\d/\d\d)\s+(?P<time>\d\d:\d\d:\d\d)\s+\[(?P<process>\d+)\]\s+sent\s+(?P<sent>[\d,]+)\s+bytes\s+received\s+(?P<received>[\d,]+)\s+bytes\s+(?P<bytes_sec>[\d,.]+)\s+bytes/sec')
stat3_line_log_v_re = re.compile(r'(?P<date>\d\d\d\d/\d\d/\d\d)\s+(?P<time>\d\d:\d\d:\d\d)\s+\[(?P<process>\d+)]\s+total\s+size\s+is\s+(?P<total_size>[\d,]+)\s+speedup\s+is\s+(?P<speedup>[\d,.]+)')
stat_ex_files_number_re = re.compile(r'Number\sof\sfiles:\s(?P<files_total>[,0123456789]+)\s\(reg:\s(?P<files_regular>[,0123456789]+),\sdir:\s(?P<files_dir>[,0123456789]+)\)$')
stat_ex_files_created_re = re.compile(r'Number\sof\screated\sfiles:\s(?P<files_created_total>[,0123456789]+)\s\(reg:\s(?P<files_created_regular>[,0123456789]+),\sdir:\s(?P<files_created_dir>[,0123456789]+)\)$')
stat_ex_files_deleted_re = re.compile(r'Number\sof\sdeleted\sfiles:\s(?P<files_deleted>[,0123456789]+)$')
stat_ex_files_transferred_re = re.compile(r'Number\sof\sregular\sfiles\stransferred:\s(?P<files_transferred>[,0123456789]+)$')
stat_ex_files_transferred_size_re = re.compile(r'Total\sfile\ssize:\s(?P<files_transferred_size>[,.0123456789]+\S?)\sbytes$')
stat_ex_literal_data_re = re.compile(r'Literal\sdata:\s(?P<literal_data>[,.0123456789]+\S?)\sbytes$')
stat_ex_matched_data_re = re.compile(r'Matched\sdata:\s(?P<matched_data>[,.0123456789]+\S?)\sbytes$')
stat_ex_file_list_size_re = re.compile(r'File\slist\ssize:\s(?P<file_list_size>[,.0123456789]+\S?)$')
stat_ex_file_list_generation_time_re = re.compile(r'File\slist\sgeneration\stime:\s(?P<file_list_generation_time>[,.0123456789]+\S?)\sseconds$')
stat_ex_file_list_transfer_time_re = re.compile(r'File\slist\stransfer\stime:\s(?P<file_list_transfer_time>[,.0123456789]+\S?)\sseconds$')
for line in data:
try:
streaming_line_input_type_check(line)
@@ -408,12 +429,12 @@ def parse(
stat1_line = stat1_line_re.match(line)
if stat1_line:
summary = {
summary.update({
'type': 'summary',
'sent': stat1_line.group('sent'),
'received': stat1_line.group('received'),
'bytes_sec': stat1_line.group('bytes_sec')
}
})
continue
stat2_line = stat2_line_re.match(line)
@@ -424,12 +445,12 @@ def parse(
stat1_line_simple = stat1_line_simple_re.match(line)
if stat1_line_simple:
summary = {
summary.update({
'type': 'summary',
'sent': stat1_line_simple.group('sent'),
'received': stat1_line_simple.group('received'),
'bytes_sec': stat1_line_simple.group('bytes_sec')
}
})
continue
stat2_line_simple = stat2_line_simple_re.match(line)
@@ -440,7 +461,7 @@ def parse(
stat_line_log = stat_line_log_re.match(line)
if stat_line_log:
summary = {
summary.update({
'type': 'summary',
'date': stat_line_log.group('date'),
'time': stat_line_log.group('time'),
@@ -448,12 +469,12 @@ def parse(
'sent': stat_line_log.group('sent'),
'received': stat_line_log.group('received'),
'total_size': stat_line_log.group('total_size')
}
})
continue
stat1_line_log_v = stat1_line_log_v_re.match(line)
if stat1_line_log_v:
summary = {
summary.update({
'type': 'summary',
'date': stat1_line_log_v.group('date'),
'time': stat1_line_log_v.group('time'),
@@ -462,7 +483,7 @@ def parse(
'hash_hits': stat1_line_log_v.group('hash_hits'),
'false_alarms': stat1_line_log_v.group('false_alarms'),
'data': stat1_line_log_v.group('data')
}
})
continue
stat2_line_log_v = stat2_line_log_v_re.match(line)
@@ -478,6 +499,61 @@ def parse(
summary['speedup'] = stat3_line_log_v.group('speedup')
continue
# extra stats lines when using rsync --stats or --info=stats[1-3]
stat_ex_files_number_v = stat_ex_files_number_re.match(line)
if stat_ex_files_number_v:
summary['total_files'] = stat_ex_files_number_v.group('files_total')
summary['regular_files'] = stat_ex_files_number_v.group('files_regular')
summary['dir_files'] = stat_ex_files_number_v.group('files_dir')
continue
stat_ex_files_created_v = stat_ex_files_created_re.match(line)
if stat_ex_files_created_v:
summary['total_created_files'] = stat_ex_files_created_v.group('files_created_total')
summary['created_regular_files'] = stat_ex_files_created_v.group('files_created_regular')
summary['created_dir_files'] = stat_ex_files_created_v.group('files_created_dir')
continue
stat_ex_files_deleted_v = stat_ex_files_deleted_re.match(line)
if stat_ex_files_deleted_v:
summary['deleted_files'] = stat_ex_files_deleted_v.group('files_deleted')
continue
stat_ex_files_transferred_v = stat_ex_files_transferred_re.match(line)
if stat_ex_files_transferred_v:
summary['transferred_files'] = stat_ex_files_transferred_v.group('files_transferred')
continue
stat_ex_files_transferred_size_v = stat_ex_files_transferred_size_re.match(line)
if stat_ex_files_transferred_size_v:
summary['transferred_file_size'] = stat_ex_files_transferred_size_v.group('files_transferred_size')
continue
stat_ex_literal_data_v = stat_ex_literal_data_re.match(line)
if stat_ex_literal_data_v:
summary['literal_data'] = stat_ex_literal_data_v.group('literal_data')
continue
stat_ex_matched_data_v = stat_ex_matched_data_re.match(line)
if stat_ex_matched_data_v:
summary['matched_data'] = stat_ex_matched_data_v.group('matched_data')
continue
stat_ex_file_list_size_v = stat_ex_file_list_size_re.match(line)
if stat_ex_file_list_size_v:
summary['file_list_size'] = stat_ex_file_list_size_v.group('file_list_size')
continue
stat_ex_file_list_generation_time_v = stat_ex_file_list_generation_time_re.match(line)
if stat_ex_file_list_generation_time_v:
summary['file_list_generation_time'] = stat_ex_file_list_generation_time_v.group('file_list_generation_time')
continue
stat_ex_file_list_transfer_time_v = stat_ex_file_list_transfer_time_re.match(line)
if stat_ex_file_list_transfer_time_v:
summary['file_list_transfer_time'] = stat_ex_file_list_transfer_time_v.group('file_list_transfer_time')
continue
except Exception as e:
yield raise_or_yield(ignore_exceptions, e, line)
@@ -488,3 +564,6 @@ def parse(
except Exception as e:
yield raise_or_yield(ignore_exceptions, e, '')
# unused return for Mypy
return []

335
jc/parsers/typeset.py Normal file
View File

@@ -0,0 +1,335 @@
r"""jc - JSON Convert `typeset` and `declare` Bash internal command output parser
Convert `typeset` and `declare` bash internal commands with no options or the
following: `-a`, `-A`, `-i`, `-l`, `-p`, `-r`, `-u`, and `-x`
Note: function parsing is not supported (e.g. `-f` or `-F`)
Usage (cli):
$ typeset | jc --typeset
Usage (module):
import jc
result = jc.parse('typeset', typeset_command_output)
Schema:
[
{
"name": string,
"value": string/integer/array/object/null, # [0]
"type": string, # [1]
"readonly": boolean/null,
"integer": boolean/null,
"lowercase": boolean/null,
"uppercase": boolean/null,
"exported": boolean/null
}
]
Key/value pairs other than `name`, `value`, and `type` will only be non-null
when the information is available from the `typeset` or `declare` output.
If declare options are not given to `jc` within the `typeset` output, then
it will assume all arrays are simple `array` type.
[0] Based on type. `variable` type is null if not set, a string when the
bash variable is set unless the `integer` field is set to `True`, then
the type is integer. `array` type is an array of strings or integers as
above. `associative` type is an object of key/value pairs where values
are strings or integers as above. Objects have the schema of:
{
"<key1>": string/integer,
"<key2>": string/integer
}
[1] Possible values: `variable`, `array`, or `associative`
Examples:
$ typeset -p | jc --typeset -p
[
{
"name": "associative_array",
"value": {
"key2": "abc",
"key3": "1 2 3",
"key1": "hello \"world\""
},
"type": "associative",
"readonly": false,
"integer": false,
"lowercase": false,
"uppercase": false,
"exported": false
},
{
"name": "integers_associative_array",
"value": {
"one": 1,
"two": 500,
"three": 999
},
"type": "associative",
"readonly": false,
"integer": true,
"lowercase": false,
"uppercase": false,
"exported": false
}
]
$ typeset -p | jc --typeset -p -r
[
{
"name": "associative_array",
"value": {
"key2": "abc",
"key3": "1 2 3",
"key1": "hello \"world\""
},
"type": "associative",
"readonly": false,
"integer": false,
"lowercase": false,
"uppercase": false,
"exported": false
},
{
"name": "integers_associative_array",
"value": {
"one": "1",
"two": "500",
"three": "999"
},
"type": "associative",
"readonly": false,
"integer": true,
"lowercase": false,
"uppercase": false,
"exported": false
}
]
"""
import shlex
import re
from typing import List, Dict
from jc.jc_types import JSONDictType
import jc.utils
class info():
"""Provides parser metadata (version, author, etc.)"""
version = '1.0'
description = '`typeset` and `declare` command parser'
author = 'Kelly Brazil'
author_email = 'kellyjonbrazil@gmail.com'
compatible = ['linux', 'darwin', 'cygwin', 'win32', 'aix', 'freebsd']
tags = ['command']
__version__ = info.version
VAR_DEF_PATTERN = re.compile(r'(?P<name>[a-zA-Z_][a-zA-Z0-9_]*)=(?P<val>[^(][^[].+)$')
SIMPLE_ARRAY_DEF_PATTERN = re.compile(r'(?P<name>[a-zA-Z_][a-zA-Z0-9_]*)=(?P<body>\(\[\d+\]=.+\))$')
ASSOCIATIVE_ARRAY_DEF_PATTERN = re.compile(r'(?P<name>[a-zA-Z_][a-zA-Z0-9_]*)=(?P<body>\(\[[a-zA-Z_][a-zA-Z0-9_]*\]=.+\))$')
EMPTY_ARRAY_DEF_PATTERN = re.compile(r'(?P<name>[a-zA-Z_][a-zA-Z0-9_]*)=\(\)$')
EMPTY_VAR_DEF_PATTERN = re.compile(r'declare\s.+\s(?P<name>[a-zA-Z_][a-zA-Z0-9_]*)$')
DECLARE_OPTS_PATTERN = re.compile(r'declare\s(?P<options>.+?)\s[a-zA-Z_][a-zA-Z0-9_]*')
def _process(proc_data: List[JSONDictType]) -> List[JSONDictType]:
"""
Final processing to conform to the schema.
Parameters:
proc_data: (List of Dictionaries) raw structured data to process
Returns:
List of Dictionaries. Structured to conform to the schema.
"""
for item in proc_data:
if item['type'] == 'variable' and item['integer']:
item['value'] = jc.utils.convert_to_int(item['value'])
elif item['type'] == 'array' and item['integer'] \
and isinstance(item['value'], list):
new_num_list = []
for number in item['value']:
new_num_list.append(jc.utils.convert_to_int(number))
item['value'] = new_num_list
elif (item['type'] == 'array' and item['integer'] \
and isinstance(item['value'], dict)) \
or (item['type'] == 'associative' and item['integer']):
new_num_dict: Dict[str, int] = {}
for key, val in item['value'].items():
new_num_dict.update({key: jc.utils.convert_to_int(val)})
item['value'] = new_num_dict
return proc_data
def _get_simple_array_vals(body: str) -> List[str]:
body = _remove_bookends(body)
body_split = shlex.split(body)
values = []
for item in body_split:
_, val = item.split('=', maxsplit=1)
values.append(_remove_quotes(val))
return values
def _get_associative_array_vals(body: str) -> Dict[str, str]:
body = _remove_bookends(body)
body_split = shlex.split(body)
values: Dict = {}
for item in body_split:
key, val = item.split('=', maxsplit=1)
key = _remove_bookends(key, '[', ']')
values.update({key: val})
return values
def _get_declare_options(line: str, type_hint: str = 'variable') -> Dict:
opts = {
'type': type_hint,
'readonly': None,
'integer': None,
'lowercase': None,
'uppercase': None,
'exported': None
}
opts_map = {
'r': 'readonly',
'i': 'integer',
'l': 'lowercase',
'u': 'uppercase',
'x': 'exported'
}
declare_opts_match = re.match(DECLARE_OPTS_PATTERN, line)
if declare_opts_match:
for opt in declare_opts_match['options']:
if opt in opts_map:
opts[opts_map[opt]] = True
continue
if 'a' in declare_opts_match['options']:
opts['type'] = 'array'
elif 'A' in declare_opts_match['options']:
opts['type'] = 'associative'
# flip all remaining Nones to False
for option in opts.items():
key, val = option
if val is None:
opts[key] = False
return opts
def _remove_bookends(data: str, start_char: str = '(', end_char: str = ')') -> str:
if data.startswith(start_char) and data.endswith(end_char):
return data[1:-1]
return data
def _remove_quotes(data: str, remove_char: str ='"') -> str:
if data.startswith(remove_char) and data.endswith(remove_char):
return data[1:-1]
return data
def parse(
data: str,
raw: bool = False,
quiet: bool = False
) -> List[JSONDictType]:
"""
Main text parsing function
Parameters:
data: (string) text data to parse
raw: (boolean) unprocessed output if True
quiet: (boolean) suppress warning messages if True
Returns:
List of Dictionaries. Raw or processed structured data.
"""
jc.utils.compatibility(__name__, info.compatible, quiet)
jc.utils.input_type_check(data)
raw_output: List[Dict] = []
if jc.utils.has_data(data):
for line in filter(None, data.splitlines()):
item = {
"name": '',
"value": '',
"type": None,
"readonly": None,
"integer": None,
"lowercase": None,
"uppercase": None,
"exported": None
}
# regular variable
var_def_match = re.search(VAR_DEF_PATTERN, line)
if var_def_match:
item['name'] = var_def_match['name']
item['value'] = _remove_quotes(var_def_match['val'])
item.update(_get_declare_options(line, 'variable'))
raw_output.append(item)
continue
# empty variable
empty_var_def_match = re.search(EMPTY_VAR_DEF_PATTERN, line)
if empty_var_def_match:
item['name'] = empty_var_def_match['name']
item['value'] = None
item.update(_get_declare_options(line, 'variable'))
raw_output.append(item)
continue
# simple array
simple_arr_def_match = re.search(SIMPLE_ARRAY_DEF_PATTERN, line)
if simple_arr_def_match:
item['name'] = simple_arr_def_match['name']
item['value'] = _get_simple_array_vals(simple_arr_def_match['body'])
item.update(_get_declare_options(line, 'array'))
raw_output.append(item)
continue
# associative array
associative_arr_def_match = re.search(ASSOCIATIVE_ARRAY_DEF_PATTERN, line)
if associative_arr_def_match:
item['name'] = associative_arr_def_match['name']
item['value'] = _get_associative_array_vals(associative_arr_def_match['body'])
item.update(_get_declare_options(line, 'associative'))
raw_output.append(item)
continue
# empty array
empty_arr_def_match = re.search(EMPTY_ARRAY_DEF_PATTERN, line)
if empty_arr_def_match:
item['name'] = empty_arr_def_match['name']
item['value'] = []
item.update(_get_declare_options(line, 'array'))
raw_output.append(item)
continue
return raw_output if raw else _process(raw_output)

View File

@@ -1,11 +1,12 @@
import setuptools
with open('README.md', 'r') as f:
long_description = f.read()
setuptools.setup(
name='jc',
version='1.25.6',
version='1.25.7',
author='Kelly Brazil',
author_email='kellyjonbrazil@gmail.com',
description='Converts the output of popular command-line tools and file-types to JSON.',

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1 @@
[{"chain":"INPUT","default_policy":"ACCEPT","default_packets":0,"default_bytes":0,"rules":[{"pkts":17,"bytes":1172,"target":null,"prot":"all","opt":null,"in":"*","out":"*","source":"0.0.0.0/0","destination":"0.0.0.0/0"}]}]

View File

@@ -0,0 +1,3 @@
Chain INPUT (policy ACCEPT 0 packets, 0 bytes)
pkts bytes target prot opt in out source destination
17 1172 all -- * * 0.0.0.0/0 0.0.0.0/0

View File

@@ -0,0 +1 @@
[{"total_files":23784,"regular_files":23191,"dir_files":593,"total_created_files":2651,"created_regular_files":2611,"created_dir_files":40,"deleted_files":0,"transferred_files":2629,"transferred_file_size":6880000000000,"literal_data":0,"matched_data":0,"file_list_size":98100,"file_list_generation_time":0.001,"file_list_transfer_time":0.0,"type":"summary","sent":8990,"received":1290000,"bytes_sec":370210.0,"total_size":6880000000000,"speedup":5311650.06}]

View File

@@ -0,0 +1 @@
[{"summary":{"total_files":23784,"regular_files":23191,"dir_files":593,"total_created_files":2651,"created_regular_files":2611,"created_dir_files":40,"deleted_files":0,"transferred_files":2629,"transferred_file_size":6880000000000,"literal_data":0,"matched_data":0,"file_list_size":98100,"file_list_generation_time":0.001,"file_list_transfer_time":0.0,"sent":8990,"received":1290000,"bytes_sec":370210.0,"total_size":6880000000000,"speedup":5311650.06},"files":[]}]

View File

@@ -0,0 +1,56 @@
rsync[1817530] (server sender) heap statistics:
arena: 1204224 (bytes from sbrk)
ordblks: 46 (chunks not in use)
rsync[1007426] (receiver) heap statistics:
arena: 9244672 (bytes from sbrk)
ordblks: 57 (chunks not in use)
smblks: 1 (free fastbin blocks)
hblks: 1 (chunks from mmap)
hblkhd: 266240 (bytes from mmap)
allmem: 9510912 (bytes from sbrk + mmap)
usmblks: 0 (always 0)
fsmblks: 96 (bytes in freed fastbin blocks)
uordblks: 486480 (bytes used)
fordblks: 8758192 (bytes free)
keepcost: 133856 (bytes in releasable chunk)
smblks: 2 (free fastbin blocks)
hblks: 1 (chunks from mmap)
hblkhd: 266240 (bytes from mmap)
allmem: 1470464 (bytes from sbrk + mmap)
usmblks: 0 (always 0)
fsmblks: 192 (bytes in freed fastbin blocks)
uordblks: 478288 (bytes used)
fordblks: 725936 (bytes free)
keepcost: 427216 (bytes in releasable chunk)
rsync[1007424] (generator) heap statistics:
arena: 1384448 (bytes from sbrk)
ordblks: 6 (chunks not in use)
smblks: 1 (free fastbin blocks)
hblks: 1 (chunks from mmap)
hblkhd: 266240 (bytes from mmap)
allmem: 1650688 (bytes from sbrk + mmap)
usmblks: 0 (always 0)
fsmblks: 96 (bytes in freed fastbin blocks)
uordblks: 486160 (bytes used)
fordblks: 898288 (bytes free)
keepcost: 132272 (bytes in releasable chunk)
Number of files: 23,784 (reg: 23,191, dir: 593)
Number of created files: 2,651 (reg: 2,611, dir: 40)
Number of deleted files: 0
Number of regular files transferred: 2,629
Total file size: 6.88T bytes
Total transferred file size: 759.17G bytes
Literal data: 0 bytes
Matched data: 0 bytes
File list size: 98.10K
File list generation time: 0.001 seconds
File list transfer time: 0.000 seconds
Total bytes sent: 8.99K
Total bytes received: 1.29M
sent 8.99K bytes received 1.29M bytes 370.21K bytes/sec
total size is 6.88T speedup is 5,311,650.06 (DRY RUN)

View File

@@ -0,0 +1 @@
[{"name":"BASH_ARGC","value":[],"type":"array","readonly":false,"integer":false,"lowercase":false,"uppercase":false,"exported":false},{"name":"BASH_ARGV","value":[],"type":"array","readonly":false,"integer":false,"lowercase":false,"uppercase":false,"exported":false},{"name":"BASH_LINENO","value":[],"type":"array","readonly":false,"integer":false,"lowercase":false,"uppercase":false,"exported":false},{"name":"BASH_SOURCE","value":[],"type":"array","readonly":false,"integer":false,"lowercase":false,"uppercase":false,"exported":false},{"name":"BASH_VERSINFO","value":["5","3","9","1","release","aarch64-apple-darwin24.6.0"],"type":"array","readonly":true,"integer":false,"lowercase":false,"uppercase":false,"exported":false},{"name":"DIRSTACK","value":[],"type":"array","readonly":false,"integer":false,"lowercase":false,"uppercase":false,"exported":false},{"name":"FUNCNAME","value":null,"type":"array","readonly":false,"integer":false,"lowercase":false,"uppercase":false,"exported":false},{"name":"GROUPS","value":[],"type":"array","readonly":false,"integer":false,"lowercase":false,"uppercase":false,"exported":false},{"name":"PIPESTATUS","value":["0"],"type":"array","readonly":false,"integer":false,"lowercase":false,"uppercase":false,"exported":false},{"name":"integers_array","value":[1,100,999],"type":"array","readonly":false,"integer":true,"lowercase":false,"uppercase":false,"exported":false},{"name":"simple_array","value":["hello \"world\"","abc","1 2 3"],"type":"array","readonly":false,"integer":false,"lowercase":false,"uppercase":false,"exported":false},{"name":"uppercase_array","value":["ABC","123","XYZ"],"type":"array","readonly":true,"integer":false,"lowercase":false,"uppercase":true,"exported":false}]

12
tests/fixtures/generic/typeset--a.out vendored Normal file
View File

@@ -0,0 +1,12 @@
declare -a BASH_ARGC=()
declare -a BASH_ARGV=()
declare -a BASH_LINENO=()
declare -a BASH_SOURCE=()
declare -ar BASH_VERSINFO=([0]="5" [1]="3" [2]="9" [3]="1" [4]="release" [5]="aarch64-apple-darwin24.6.0")
declare -a DIRSTACK=()
declare -a FUNCNAME
declare -a GROUPS=()
declare -a PIPESTATUS=([0]="0")
declare -ai integers_array=([0]="1" [1]="100" [2]="999")
declare -a simple_array=([0]="hello \"world\"" [1]="abc" [2]="1 2 3")
declare -aru uppercase_array=([0]="ABC" [1]="123" [2]="XYZ")

View File

@@ -0,0 +1 @@
[{"name":"BASH_ALIASES","value":[],"type":"associative","readonly":false,"integer":false,"lowercase":false,"uppercase":false,"exported":false},{"name":"BASH_CMDS","value":[],"type":"associative","readonly":false,"integer":false,"lowercase":false,"uppercase":false,"exported":false},{"name":"associative_array","value":{"key2":"abc","key3":"1 2 3","key1":"hello \"world\""},"type":"associative","readonly":false,"integer":false,"lowercase":false,"uppercase":false,"exported":false},{"name":"integers_associative_array","value":{"one":1,"two":500,"three":999},"type":"associative","readonly":false,"integer":true,"lowercase":false,"uppercase":false,"exported":false}]

View File

@@ -0,0 +1,4 @@
declare -A BASH_ALIASES=()
declare -A BASH_CMDS=()
declare -A associative_array=([key2]="abc" [key3]="1 2 3" [key1]="hello \"world\"" )
declare -Ai integers_associative_array=([one]="1" [two]="500" [three]="999" )

File diff suppressed because one or more lines are too long

32
tests/fixtures/generic/typeset--p.out vendored Normal file
View File

@@ -0,0 +1,32 @@
declare -- BASH="/opt/homebrew/bin/bash"
declare -r BASHOPTS="checkwinsize:cmdhist:complete_fullquote:expand_aliases:extquote:force_fignore:globasciiranges:globskipdots:hostcomplete:interactive_comments:patsub_replacement:progcomp:promptvars:sourcepath"
declare -i BASHPID
declare -A BASH_ALIASES=()
declare -a BASH_ARGC=()
declare -- BASH_ARGV0
declare -- BASH_LOADABLES_PATH="/opt/homebrew/lib/bash:/usr/local/lib/bash:/usr/lib/bash:/opt/local/lib/bash:/usr/pkg/lib/bash:/opt/pkg/lib/bash:."
declare -ar BASH_VERSINFO=([0]="5" [1]="3" [2]="9" [3]="1" [4]="release" [5]="aarch64-apple-darwin24.6.0")
declare -- COLUMNS="92"
declare -ir EUID="501"
declare -a FUNCNAME
declare -i HISTCMD
declare -x HOME="/Users/kbrazil"
declare -- IFS=$' \t\n'
declare -x JC_COLORS="cyan,default,default,default"
declare -x OLDPWD
declare -a PIPESTATUS=([0]="0")
declare -ir PPID="50074"
declare -- PS1="\\s-\\v\\\$ "
declare -- PS2="> "
declare -- PS4="+ "
declare -i RANDOM
declare -r SHELLOPTS="braceexpand:emacs:hashall:histexpand:history:interactive-comments:monitor"
declare -- _="-p"
declare -x __CFBundleIdentifier="com.apple.Terminal"
declare -ai integers_array=([0]="1" [1]="100" [2]="999")
declare -a simple_array=([0]="hello \"world\"" [1]="abc" [2]="1 2 3")
declare -r readonly_var="hello"
declare -aru uppercase_array=([0]="ABC" [1]="123" [2]="XYZ")
declare -a num_string_array=([0]="1" [1]="2" [2]="3")
declare -A associative_array=([key2]="abc" [key3]="1 2 3" [key1]="hello \"world\"" )
declare -Ai integers_associative_array=([one]="1" [two]="500" [three]="999" )

View File

@@ -0,0 +1 @@
[{"name":"BASHOPTS","value":"checkwinsize:cmdhist:complete_fullquote:expand_aliases:extquote:force_fignore:globasciiranges:globskipdots:hostcomplete:interactive_comments:patsub_replacement:progcomp:promptvars:sourcepath","type":"variable","readonly":null,"integer":null,"lowercase":null,"uppercase":null,"exported":null},{"name":"BASH_ALIASES","value":[],"type":"array","readonly":null,"integer":null,"lowercase":null,"uppercase":null,"exported":null},{"name":"BASH_VERSINFO","value":["5","3","9","1","release","aarch64-apple-darwin24.6.0"],"type":"array","readonly":null,"integer":null,"lowercase":null,"uppercase":null,"exported":null},{"name":"BASH_VERSION","value":"'5.3.9(1)-release'","type":"variable","readonly":null,"integer":null,"lowercase":null,"uppercase":null,"exported":null},{"name":"IFS","value":"$' \\t\\n'","type":"variable","readonly":null,"integer":null,"lowercase":null,"uppercase":null,"exported":null},{"name":"INFOPATH","value":"/opt/homebrew/share/info:","type":"variable","readonly":null,"integer":null,"lowercase":null,"uppercase":null,"exported":null},{"name":"JC_COLORS","value":"cyan,default,default,default","type":"variable","readonly":null,"integer":null,"lowercase":null,"uppercase":null,"exported":null},{"name":"PS1","value":"'\\s-\\v\\$ '","type":"variable","readonly":null,"integer":null,"lowercase":null,"uppercase":null,"exported":null},{"name":"PS2","value":"'> '","type":"variable","readonly":null,"integer":null,"lowercase":null,"uppercase":null,"exported":null},{"name":"PS4","value":"'+ '","type":"variable","readonly":null,"integer":null,"lowercase":null,"uppercase":null,"exported":null},{"name":"TERM_PROGRAM_VERSION","value":"455.1","type":"variable","readonly":null,"integer":null,"lowercase":null,"uppercase":null,"exported":null},{"name":"TERM_SESSION_ID","value":"E5896C5D-9C9A-4178-9246-00158A3F832F","type":"variable","readonly":null,"integer":null,"lowercase":null,"uppercase":null,"exported":null},{"name":"XPC_FLAGS","value":"0x0","type":"variable","readonly":null,"integer":null,"lowercase":null,"uppercase":null,"exported":null},{"name":"associative_array","value":{"key2":"abc","key3":"1 2 3","key1":"hello \"world\""},"type":"associative","readonly":null,"integer":null,"lowercase":null,"uppercase":null,"exported":null},{"name":"integers_array","value":["1","100","999"],"type":"array","readonly":null,"integer":null,"lowercase":null,"uppercase":null,"exported":null},{"name":"simple_array","value":["hello \"world\"","abc","1 2 3"],"type":"array","readonly":null,"integer":null,"lowercase":null,"uppercase":null,"exported":null},{"name":"uppercase_array","value":["ABC","123","XYZ"],"type":"array","readonly":null,"integer":null,"lowercase":null,"uppercase":null,"exported":null}]

View File

@@ -0,0 +1,19 @@
BASHOPTS=checkwinsize:cmdhist:complete_fullquote:expand_aliases:extquote:force_fignore:globasciiranges:globskipdots:hostcomplete:interactive_comments:patsub_replacement:progcomp:promptvars:sourcepath
BASH_ALIASES=()
BASH_VERSINFO=([0]="5" [1]="3" [2]="9" [3]="1" [4]="release" [5]="aarch64-apple-darwin24.6.0")
BASH_VERSION='5.3.9(1)-release'
COLUMNS=92
IFS=$' \t\n'
INFOPATH=/opt/homebrew/share/info:
JC_COLORS=cyan,default,default,default
PS1='\s-\v\$ '
PS2='> '
PS4='+ '
TERM_PROGRAM_VERSION=455.1
TERM_SESSION_ID=E5896C5D-9C9A-4178-9246-00158A3F832F
XPC_FLAGS=0x0
_=-a
associative_array=([key2]="abc" [key3]="1 2 3" [key1]="hello \"world\"" )
integers_array=([0]="1" [1]="100" [2]="999")
simple_array=([0]="hello \"world\"" [1]="abc" [2]="1 2 3")
uppercase_array=([0]="ABC" [1]="123" [2]="XYZ")

View File

@@ -0,0 +1 @@
[{"name":"BASHOPTS","value":"checkwinsize:cmdhist:complete_fullquote:expand_aliases:extquote:force_fignore:globasciiranges:globskipdots:hostcomplete:interactive_comments:patsub_replacement:progcomp:promptvars:sourcepath","type":"variable","readonly":true,"integer":false,"lowercase":false,"uppercase":false,"exported":false},{"name":"BASH_VERSINFO","value":["5","3","9","1","release","aarch64-apple-darwin24.6.0"],"type":"array","readonly":true,"integer":false,"lowercase":false,"uppercase":false,"exported":false},{"name":"EUID","value":501,"type":"variable","readonly":true,"integer":true,"lowercase":false,"uppercase":false,"exported":false},{"name":"PPID","value":50074,"type":"variable","readonly":true,"integer":true,"lowercase":false,"uppercase":false,"exported":false},{"name":"SHELLOPTS","value":"braceexpand:emacs:hashall:histexpand:history:interactive-comments:monitor","type":"variable","readonly":true,"integer":false,"lowercase":false,"uppercase":false,"exported":false},{"name":"UID","value":501,"type":"variable","readonly":true,"integer":true,"lowercase":false,"uppercase":false,"exported":false},{"name":"readonly_var","value":"hello","type":"variable","readonly":true,"integer":false,"lowercase":false,"uppercase":false,"exported":false},{"name":"uppercase_array","value":["ABC","123","XYZ"],"type":"array","readonly":true,"integer":false,"lowercase":false,"uppercase":true,"exported":false}]

8
tests/fixtures/generic/typeset--r.out vendored Normal file
View File

@@ -0,0 +1,8 @@
declare -r BASHOPTS="checkwinsize:cmdhist:complete_fullquote:expand_aliases:extquote:force_fignore:globasciiranges:globskipdots:hostcomplete:interactive_comments:patsub_replacement:progcomp:promptvars:sourcepath"
declare -ar BASH_VERSINFO=([0]="5" [1]="3" [2]="9" [3]="1" [4]="release" [5]="aarch64-apple-darwin24.6.0")
declare -ir EUID="501"
declare -ir PPID="50074"
declare -r SHELLOPTS="braceexpand:emacs:hashall:histexpand:history:interactive-comments:monitor"
declare -ir UID="501"
declare -r readonly_var="hello"
declare -aru uppercase_array=([0]="ABC" [1]="123" [2]="XYZ")

View File

@@ -0,0 +1,46 @@
55a9e753c000-55a9e7570000 r--p 00000000 fd:00 798126 /usr/lib/systemd/systemd
Size: 208 kB
KernelPageSize: 4 kB
MMUPageSize: 4 kB
Rss: 208 kB
Pss: 104 kB
Shared_Clean: 208 kB
Shared_Dirty: 0 kB
Private_Clean: 0 kB
Private_Dirty: 0 kB
Referenced: 208 kB
Anonymous: 0 kB
LazyFree: 0 kB
AnonHugePages: 0 kB
ShmemPmdMapped: 0 kB
FilePmdMapped: 0 kB
Shared_Hugetlb: 0 kB
Private_Hugetlb: 0 kB
Swap: 0 kB
SwapPss: 0 kB
Locked: 0 kB
THPeligible: 0
VmFlags: rd mr mw me dw sd zz
55a9e7570000-55a9e763a000 r-xp 00034000 fd:00 798126 /usr/lib/systemd/systemd
Size: 808 kB
KernelPageSize: 4 kB
MMUPageSize: 4 kB
Rss: 800 kB
Pss: 378 kB
Shared_Clean: 800 kB
Shared_Dirty: 0 kB
Private_Clean: 0 kB
Private_Dirty: 0 kB
Referenced: 800 kB
Anonymous: 0 kB
LazyFree: 0 kB
AnonHugePages: 0 kB
ShmemPmdMapped: 0 kB
FilePmdMapped: 0 kB
Shared_Hugetlb: 0 kB
Private_Hugetlb: 0 kB
Swap: 0 kB
SwapPss: 0 kB
Locked: 0 kB
THPeligible: 0
VmFlags: rd ex mr mw me dw sd yy

View File

@@ -0,0 +1 @@
[{"start":"55a9e753c000","end":"55a9e7570000","perms":["read","private"],"offset":"00000000","maj":"fd","min":"00","inode":798126,"pathname":"/usr/lib/systemd/systemd","Size":208,"KernelPageSize":4,"MMUPageSize":4,"Rss":208,"Pss":104,"Shared_Clean":208,"Shared_Dirty":0,"Private_Clean":0,"Private_Dirty":0,"Referenced":208,"Anonymous":0,"LazyFree":0,"AnonHugePages":0,"ShmemPmdMapped":0,"FilePmdMapped":0,"Shared_Hugetlb":0,"Private_Hugetlb":0,"Swap":0,"SwapPss":0,"Locked":0,"THPeligible":0,"VmFlags":["rd","mr","mw","me","dw","sd","zz"],"VmFlags_pretty":["readable","may read","may write","may execute","disabled write to the mapped file","soft-dirty flag","zz"]},{"start":"55a9e7570000","end":"55a9e763a000","perms":["read","execute","private"],"offset":"00034000","maj":"fd","min":"00","inode":798126,"pathname":"/usr/lib/systemd/systemd","Size":808,"KernelPageSize":4,"MMUPageSize":4,"Rss":800,"Pss":378,"Shared_Clean":800,"Shared_Dirty":0,"Private_Clean":0,"Private_Dirty":0,"Referenced":800,"Anonymous":0,"LazyFree":0,"AnonHugePages":0,"ShmemPmdMapped":0,"FilePmdMapped":0,"Shared_Hugetlb":0,"Private_Hugetlb":0,"Swap":0,"SwapPss":0,"Locked":0,"THPeligible":0,"VmFlags":["rd","ex","mr","mw","me","dw","sd","yy"],"VmFlags_pretty":["readable","executable","may read","may write","may execute","disabled write to the mapped file","soft-dirty flag","yy"]}]

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

File diff suppressed because one or more lines are too long

View File

@@ -0,0 +1 @@
[{"filename": "out.txt", "mode": "?", "hash": "5a3c9b9e4594dd4a8a5e963a6e917deb844458e6"}]

View File

@@ -0,0 +1 @@
[{"filename": "out.txt", "mode": "?", "hash": "5a3c9b9e4594dd4a8a5e963a6e917deb844458e6"}]

View File

@@ -0,0 +1 @@
5a3c9b9e4594dd4a8a5e963a6e917deb844458e6 ?out.txt

View File

@@ -0,0 +1 @@
[{"filename": "openssl-3.6.0.tar.gz", "mode": "*", "hash": "b6a5f44b7eb69e3fa35dbf15524405b44837a481d43d81daddde3ff21fcbb8e9"}]

View File

@@ -0,0 +1 @@
[{"filename": "openssl-3.6.0.tar.gz", "mode": "binary", "hash": "b6a5f44b7eb69e3fa35dbf15524405b44837a481d43d81daddde3ff21fcbb8e9"}]

View File

@@ -0,0 +1 @@
b6a5f44b7eb69e3fa35dbf15524405b44837a481d43d81daddde3ff21fcbb8e9 *openssl-3.6.0.tar.gz

View File

@@ -0,0 +1 @@
[{"filename": "out.txt", "mode": "U", "hash": "6fe4d572948d4c132d1b1b0ab91e89de4be01efd"}, {"filename": "out.txt", "mode": "^", "hash": "68382a729a930a2219f0bd10c5c4d61eec856a96"}]

View File

@@ -0,0 +1 @@
[{"filename": "out.txt", "mode": "universal", "hash": "6fe4d572948d4c132d1b1b0ab91e89de4be01efd"}, {"filename": "out.txt", "mode": "bits", "hash": "68382a729a930a2219f0bd10c5c4d61eec856a96"}]

View File

@@ -0,0 +1,2 @@
6fe4d572948d4c132d1b1b0ab91e89de4be01efd Uout.txt
68382a729a930a2219f0bd10c5c4d61eec856a96 ^out.txt

View File

@@ -95,6 +95,25 @@ class MyTests(unittest.TestCase):
self.assertEqual(jc.parsers.dir.parse(self.windows_10_dir_S, quiet=True),
self.windows_10_dir_S_json)
def test_dir_drive_letter_d(self):
"""
Test that the D: drive letter is not stripped from the parent path.
Regression test: lstrip(" Directory of ") strips any char in the set
{' ','D','i','r','e','c','t','o','y','f'}, which incorrectly removes
the 'D' from 'D:\\'.
"""
data = (
' Volume in drive D has no label.\r\n'
' Volume Serial Number is 1234-5678\r\n'
'\r\n'
' Directory of D:\\Users\\testuser\r\n'
'\r\n'
'03/24/2021 03:15 PM <DIR> .\r\n'
'03/24/2021 03:15 PM <DIR> ..\r\n'
)
result = jc.parsers.dir.parse(data, quiet=True)
self.assertEqual(result[0]['parent'], 'D:\\Users\\testuser')
if __name__ == '__main__':
unittest.main()

View File

@@ -18,6 +18,15 @@ class MyTests(unittest.TestCase):
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/sha384sum.out'), 'r', encoding='utf-8') as f:
centos_7_7_sha384sum = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/shasum-portable.out'), 'r', encoding='utf-8') as f:
ubuntu_18_04_shasum_portable = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-24.04/sha256sum-binary.out'), 'r', encoding='utf-8') as f:
ubuntu_24_04_sha256sum_binary = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-24.04/shasum-universal-bits.out'), 'r', encoding='utf-8') as f:
ubuntu_24_04_shasum_universal_bits = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/md5.out'), 'r', encoding='utf-8') as f:
osx_10_14_6_md5 = f.read()
@@ -28,18 +37,51 @@ class MyTests(unittest.TestCase):
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/md5sum.json'), 'r', encoding='utf-8') as f:
centos_7_7_md5sum_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/md5sum-raw.json'), 'r', encoding='utf-8') as f:
centos_7_7_md5sum_raw_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/sha256sum.json'), 'r', encoding='utf-8') as f:
centos_7_7_sha256sum_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/sha256sum-raw.json'), 'r', encoding='utf-8') as f:
centos_7_7_sha256sum_raw_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/sha384sum.json'), 'r', encoding='utf-8') as f:
centos_7_7_sha384sum_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/sha384sum-raw.json'), 'r', encoding='utf-8') as f:
centos_7_7_sha384sum_raw_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/shasum-portable.json'), 'r', encoding='utf-8') as f:
ubuntu_18_04_shasum_portable_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-18.04/shasum-portable-raw.json'), 'r', encoding='utf-8') as f:
ubuntu_18_04_shasum_portable_raw_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-24.04/sha256sum-binary.json'), 'r', encoding='utf-8') as f:
ubuntu_24_04_sha256sum_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-24.04/sha256sum-binary-raw.json'), 'r', encoding='utf-8') as f:
ubuntu_24_04_sha256sum_raw_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-24.04/shasum-universal-bits.json'), 'r', encoding='utf-8') as f:
ubuntu_24_04_shasum_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/ubuntu-24.04/shasum-universal-bits-raw.json'), 'r', encoding='utf-8') as f:
ubuntu_24_04_shasum_raw_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/md5.json'), 'r', encoding='utf-8') as f:
osx_10_14_6_md5_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/md5-raw.json'), 'r', encoding='utf-8') as f:
osx_10_14_6_md5_raw_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/shasum.json'), 'r', encoding='utf-8') as f:
osx_10_14_6_shasum_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/shasum-raw.json'), 'r', encoding='utf-8') as f:
osx_10_14_6_shasum_raw_json = json.loads(f.read())
def test_hashsum_nodata(self):
"""
@@ -53,30 +95,113 @@ class MyTests(unittest.TestCase):
"""
self.assertEqual(jc.parsers.hashsum.parse(self.centos_7_7_md5sum, quiet=True), self.centos_7_7_md5sum_json)
def test_md5sum_centos_7_7_raw(self):
"""
Test 'md5sum' on Centos 7.7, raw output
"""
self.assertEqual(
jc.parsers.hashsum.parse(self.centos_7_7_md5sum, quiet=True, raw=True),
self.centos_7_7_md5sum_raw_json)
def test_sha256sum_centos_7_7(self):
"""
Test 'sha256sum' on Centos 7.7
"""
self.assertEqual(jc.parsers.hashsum.parse(self.centos_7_7_sha256sum, quiet=True), self.centos_7_7_sha256sum_json)
def test_sha256sum_centos_7_7_raw(self):
"""
Test 'sha256sum' on Centos 7.7, raw output
"""
self.assertEqual(
jc.parsers.hashsum.parse(self.centos_7_7_sha256sum, quiet=True, raw=True),
self.centos_7_7_sha256sum_raw_json)
def test_sha384sum_centos_7_7(self):
"""
Test 'sha384sum' on Centos 7.7
"""
self.assertEqual(jc.parsers.hashsum.parse(self.centos_7_7_sha384sum, quiet=True), self.centos_7_7_sha384sum_json)
def test_sha384sum_centos_7_7_raw(self):
"""
Test 'sha384sum' on Centos 7.7, raw output
"""
self.assertEqual(jc.parsers.hashsum.parse(
self.centos_7_7_sha384sum, quiet=True, raw=True),
self.centos_7_7_sha384sum_raw_json)
def test_sha256sum_ubuntu_18_04_unsupported_mode(self):
"""
Test 'sha256sum' on Ubuntu 18.04, portable mode (no firendly name)
"""
self.assertEqual(jc.parsers.hashsum.parse(
self.ubuntu_18_04_shasum_portable, quiet=True),
self.ubuntu_18_04_shasum_portable_json)
def test_sha256sum_ubuntu_18_04_unsupported_mode_raw(self):
"""
Test 'sha256sum' on Ubuntu 18.04, portable mode (no firendly name), raw output
"""
self.assertEqual(jc.parsers.hashsum.parse(
self.ubuntu_18_04_shasum_portable, quiet=True, raw=True),
self.ubuntu_18_04_shasum_portable_raw_json)
def test_sha256sum_ubuntu_24_04_binary(self):
"""
Test 'sha256sum' on Ubuntu 24.04, binary mode
"""
self.assertEqual(jc.parsers.hashsum.parse(self.ubuntu_24_04_sha256sum_binary, quiet=True), self.ubuntu_24_04_sha256sum_json)
def test_sha256sum_ubuntu_24_04_binary_raw(self):
"""
Test 'sha256sum' on Ubuntu 24.04, binary mode, raw output
"""
self.assertEqual(jc.parsers.hashsum.parse(
self.ubuntu_24_04_sha256sum_binary, quiet=True, raw=True),
self.ubuntu_24_04_sha256sum_raw_json)
def test_shasum_ubuntu_24_04_universal_bits(self):
"""
Test 'shasum' on Ubuntu 24.04, universal and bits modes
"""
self.assertEqual(jc.parsers.hashsum.parse(self.ubuntu_24_04_shasum_universal_bits, quiet=True), self.ubuntu_24_04_shasum_json)
def test_shasum_ubuntu_24_04_raw(self):
"""
Test 'shasum' on Ubuntu 24.04, universal and bits modes, raw output
"""
self.assertEqual(
jc.parsers.hashsum.parse(self.ubuntu_24_04_shasum_universal_bits, quiet=True, raw=True),
self.ubuntu_24_04_shasum_raw_json)
def test_md5_osx_10_14_6(self):
"""
Test 'md5' on OSX 10.14.6
"""
self.assertEqual(jc.parsers.hashsum.parse(self.osx_10_14_6_md5, quiet=True), self.osx_10_14_6_md5_json)
def test_md5_osx_10_14_6_raw(self):
"""
Test 'md5' on OSX 10.14.6, raw output
"""
self.assertEqual(
jc.parsers.hashsum.parse(self.osx_10_14_6_md5, quiet=True, raw=True),
self.osx_10_14_6_md5_raw_json)
def test_shasum_osx_10_14_6(self):
"""
Test 'shasum' on OSX 10.14.6
"""
self.assertEqual(jc.parsers.hashsum.parse(self.osx_10_14_6_shasum, quiet=True), self.osx_10_14_6_shasum_json)
def test_shasum_osx_10_14_6_raw(self):
"""
Test 'shasum' on OSX 10.14.6, raw output
"""
self.assertEqual(
jc.parsers.hashsum.parse(self.osx_10_14_6_shasum, quiet=True, raw=True),
self.osx_10_14_6_shasum_raw_json)
if __name__ == '__main__':
unittest.main()

View File

@@ -148,6 +148,21 @@ class MyTests(unittest.TestCase):
"""
self.assertEqual(jc.parsers.ifconfig.parse(self.osx_freebsd12_ifconfig_extra_fields4, quiet=True), self.freebsd12_ifconfig_extra_fields4_json)
def test_ifconfig_hex_mask_all_zeros(self):
"""
Test 'ifconfig' with 0x00000000 netmask (FreeBSD/macOS hex format).
Regression test: lstrip('0x') incorrectly strips leading '0' chars
from the hex digits, producing wrong mask for all-zero masks.
"""
data = (
'lo0: flags=8049<UP,LOOPBACK,RUNNING,MULTICAST> mtu 16384\n'
'\toptions=1203<RXCSUM,TXCSUM,TXSTATUS,SW_TIMESTAMP>\n'
'\tinet 192.168.1.1 netmask 0x00000000\n'
)
result = jc.parsers.ifconfig.parse(data, quiet=True)
self.assertEqual(result[0]['ipv4_mask'], '0.0.0.0')
self.assertEqual(result[0]['ipv4'][0]['mask'], '0.0.0.0')
def test_ifconfig_utun_ipv4(self):
"""
Test 'ifconfig' with ipv4 utun addresses (macOS)

View File

@@ -48,6 +48,9 @@ class MyTests(unittest.TestCase):
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/iptables-no-jump.out'), 'r', encoding='utf-8') as f:
generic_iptables_no_jump = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/iptables-no-jump2.out'), 'r', encoding='utf-8') as f:
generic_iptables_no_jump2 = f.read()
# output
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/iptables-filter.json'), 'r', encoding='utf-8') as f:
centos_7_7_iptables_filter_json = json.loads(f.read())
@@ -88,6 +91,9 @@ class MyTests(unittest.TestCase):
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/iptables-no-jump.json'), 'r', encoding='utf-8') as f:
generic_iptables_no_jump_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/iptables-no-jump2.json'), 'r', encoding='utf-8') as f:
generic_iptables_no_jump2_json = json.loads(f.read())
def test_iptables_nodata(self):
"""
@@ -173,6 +179,12 @@ class MyTests(unittest.TestCase):
"""
self.assertEqual(jc.parsers.iptables.parse(self.generic_iptables_no_jump, quiet=True), self.generic_iptables_no_jump_json)
def test_iptables_no_jump2_generic(self):
"""
Test 'sudo iptables' with no jump target and verbose output
"""
self.assertEqual(jc.parsers.iptables.parse(self.generic_iptables_no_jump2, quiet=True), self.generic_iptables_no_jump2_json)
def test_iptables_x_option_format(self):
"""
Test iptables -x

View File

@@ -89,6 +89,58 @@ class MyTests(unittest.TestCase):
"""
self.assertEqual(jc.parsers.pip_show.parse(self.generic_pip_show_multiline_license_first_blank, quiet=True), self.generic_pip_show_multiline_license_first_blank_json)
def test_pip_show_files_section(self):
"""
Test 'pip show -f' output with a files section
"""
data = """\
Name: jc
Version: 1.25.4
Summary: Converts the output of popular command-line tools and file-types to JSON.
Home-page: https://github.com/kellyjonbrazil/jc
Author: Kelly Brazil
Author-email: kelly@gmail.com
License: MIT
Location: /home/pi/.local/lib/python3.11/site-packages
Requires: Pygments, ruamel.yaml, xmltodict
Required-by: pypiwifi
Files:
../../../bin/jc
jc-1.25.4.dist-info/RECORD
"""
expected = [{
'name': 'jc',
'version': '1.25.4',
'summary': 'Converts the output of popular command-line tools and file-types to JSON.',
'home_page': 'https://github.com/kellyjonbrazil/jc',
'author': 'Kelly Brazil',
'author_email': 'kelly@gmail.com',
'license': 'MIT',
'location': '/home/pi/.local/lib/python3.11/site-packages',
'requires': 'Pygments, ruamel.yaml, xmltodict',
'required_by': 'pypiwifi',
'files': ['../../../bin/jc', 'jc-1.25.4.dist-info/RECORD']
}]
self.assertEqual(jc.parsers.pip_show.parse(data, quiet=True), expected)
def test_pip_show_files_section_with_following_field(self):
"""
Test 'pip show -f' output when the files section is followed by a new field
"""
data = """\
Name: jc
Files:
../../../bin/jc
jc-1.25.4.dist-info/RECORD
Foo: bar
"""
expected = [{
'name': 'jc',
'files': ['../../../bin/jc', 'jc-1.25.4.dist-info/RECORD'],
'foo': 'bar'
}]
self.assertEqual(jc.parsers.pip_show.parse(data, quiet=True), expected)
if __name__ == '__main__':
unittest.main()

View File

@@ -16,7 +16,10 @@ class MyTests(unittest.TestCase):
fixtures = {
'proc_pid_smaps': (
'fixtures/linux-proc/pid_smaps',
'fixtures/linux-proc/pid_smaps.json')
'fixtures/linux-proc/pid_smaps.json'),
'proc_pid_smaps_unknown_flag': (
'fixtures/linux-proc/pid_smaps_unknown_flag',
'fixtures/linux-proc/pid_smaps_unknown_flag.json')
}
for file, filepaths in fixtures.items():
@@ -39,6 +42,13 @@ class MyTests(unittest.TestCase):
self.assertEqual(jc.parsers.proc_pid_smaps.parse(self.f_in['proc_pid_smaps'], quiet=True),
self.f_json['proc_pid_smaps'])
def test_proc_pid_smaps_unknown_flag(self):
"""
Test '/proc/<pid>/smaps' with an unknown flag
"""
self.assertEqual(jc.parsers.proc_pid_smaps.parse(self.f_in['proc_pid_smaps_unknown_flag'], quiet=True),
self.f_json['proc_pid_smaps_unknown_flag'])
if __name__ == '__main__':
unittest.main()

View File

@@ -45,6 +45,9 @@ class MyTests(unittest.TestCase):
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/rsync-i-vvv-logfile-nochange.out'), 'r', encoding='utf-8') as f:
osx_10_14_6_rsync_i_vvv_logfile_nochange = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/rsync-i-stats.out'), 'r', encoding='utf-8') as f:
generic_rsync_i_stats = f.read()
# output
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/rsync-i.json'), 'r', encoding='utf-8') as f:
centos_7_7_rsync_i_json = json.loads(f.read())
@@ -82,6 +85,9 @@ class MyTests(unittest.TestCase):
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/rsync-i-vvv-logfile-nochange.json'), 'r', encoding='utf-8') as f:
osx_10_14_6_rsync_i_vvv_logfile_nochange_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/rsync-i-stats.json'), 'r', encoding='utf-8') as f:
generic_rsync_i_stats_json = json.loads(f.read())
def test_rsync_nodata(self):
"""
@@ -173,6 +179,12 @@ total size is 221.79G speedup is 25,388.23
expected = [{"summary":{"sent":8710000,"received":29880,"bytes_sec":10990.0,"total_size":221790000000,"speedup":25388.23},"files":[]}]
self.assertEqual(jc.parsers.rsync.parse(data, quiet=True), expected)
def test_rsync_with_stats(self):
"""
Test 'rsync -i --stats' or 'rsync -i --info=stats[1-3]'
"""
self.assertEqual(jc.parsers.rsync.parse(self.generic_rsync_i_stats, quiet=True), self.generic_rsync_i_stats_json)
if __name__ == '__main__':
unittest.main()

View File

@@ -49,6 +49,9 @@ class MyTests(unittest.TestCase):
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/rsync-i-vvv-logfile-nochange.out'), 'r', encoding='utf-8') as f:
osx_10_14_6_rsync_i_vvv_logfile_nochange = f.read()
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/rsync-i-stats.out'), 'r', encoding='utf-8') as f:
generic_rsync_i_stats = f.read()
# output
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/centos-7.7/rsync-i-streaming.json'), 'r', encoding='utf-8') as f:
centos_7_7_rsync_i_streaming_json = json.loads(f.read())
@@ -86,6 +89,9 @@ class MyTests(unittest.TestCase):
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/osx-10.14.6/rsync-i-vvv-logfile-nochange-streaming.json'), 'r', encoding='utf-8') as f:
osx_10_14_6_rsync_i_vvv_logfile_nochange_streaming_json = json.loads(f.read())
with open(os.path.join(THIS_DIR, os.pardir, 'tests/fixtures/generic/rsync-i-stats-streaming.json'), 'r', encoding='utf-8') as f:
generic_rsync_i_stats_streaming_json = json.loads(f.read())
def test_rsync_s_nodata(self):
"""
@@ -177,6 +183,11 @@ total size is 221.79G speedup is 25,388.23
expected = [{"type":"summary","sent":8710000,"received":29880,"bytes_sec":10990.0,"total_size":221790000000,"speedup":25388.23}]
self.assertEqual(list(jc.parsers.rsync_s.parse(data.splitlines(), quiet=True)), expected)
def test_rsync_s_i_stats(self):
"""
Test 'rsync -i --stats' or 'rsync -i --info=stats[1-3]'
"""
self.assertEqual(list(jc.parsers.rsync_s.parse(self.generic_rsync_i_stats.splitlines(), quiet=True)), self.generic_rsync_i_stats_streaming_json)
if __name__ == '__main__':

31
tests/test_typeset.py Normal file
View File

@@ -0,0 +1,31 @@
import unittest
import os
import sys
sys.path.append(os.getcwd())
from tests import utils_for_test as test_utils
sys.path.pop()
# Execute these steps for standard tests:
# - Save this file as `test_{parser_name}.py` since the helper methods extract parser names from the filename.
# - Organize fixtures in `tests/fixtures` for optimal structure.
# - Format fixtures as follows (using double dashes):
# - `{parser_name}--{some_test_description}.out` for command output.
# - `{parser_name}--{some_test_description}.json` for expected JSON after parsing.
class MyTests(unittest.TestCase):
def test_foo_nodata(self):
"""
Test 'foo' with no data
"""
test_utils.run_no_data(self, __file__, [])
def test_foo_all_fixtures(self):
"""
Test 'foo' with various fixtures
"""
test_utils.run_all_fixtures(self, __file__)
if __name__ == '__main__':
unittest.main()