Merge pull request #26 from alexwlchan/more-compact-encoding
- ID
49b72b2- date
2025-01-15 06:50:34+00:00- author
Alex Chan <alex@alexwlchan.net>- parents
88561f5,8bcdaa9- message
Merge pull request #26 from alexwlchan/more-compact-encoding Add tests for the more compact encoding- changed files
2 files, 70 additions
Changed files
tests/test_encoder.py (1698) → tests/test_encoder.py (2541)
diff --git a/tests/test_encoder.py b/tests/test_encoder.py
index de2951b..16163ca 100644
--- a/tests/test_encoder.py
+++ b/tests/test_encoder.py
@@ -2,6 +2,8 @@
Tests for ``javascript_data_files.encoder``.
"""
+import string
+
from javascript_data_files.encoder import encode_as_json
@@ -44,3 +46,28 @@ def test_a_list_of_long_ints_is_indented_and_split() -> None:
"\n 90,\n 91,\n 92,\n 93,\n 94,\n 95,\n 96,\n 97,\n 98,\n 99"
"\n]"
)
+
+
+def test_a_list_of_strings_is_not_split_over_multiple_lines() -> None:
+ """
+ If there's a list of small strings, they're printed on one line
+ rather than across multiple lines.
+ """
+ assert encode_as_json(["a", "b", "c"]) == '["a", "b", "c"]'
+
+
+def test_a_list_of_long_strings_is_indented_and_split() -> None:
+ """
+ If there's a list with more strings than a sensible line length,
+ they're split across multiple lines.
+ """
+ json_string = encode_as_json(list(string.ascii_lowercase))
+
+ assert json_string == (
+ "["
+ '\n "a",\n "b",\n "c",\n "d",\n "e",\n "f",\n "g",\n "h",'
+ '\n "i",\n "j",\n "k",\n "l",\n "m",\n "n",\n "o",\n "p",'
+ '\n "q",\n "r",\n "s",\n "t",\n "u",\n "v",\n "w",\n "x",'
+ '\n "y",\n "z"'
+ "\n]"
+ )
tests/test_javascript_data_files.py (14431) → tests/test_javascript_data_files.py (16002)
diff --git a/tests/test_javascript_data_files.py b/tests/test_javascript_data_files.py
index 75e93f4..6130c33 100644
--- a/tests/test_javascript_data_files.py
+++ b/tests/test_javascript_data_files.py
@@ -340,6 +340,26 @@ class TestAppendToArray:
with pytest.raises(IsADirectoryError):
append_to_js_array(tmp_path, value="alex")
+ def test_indentation_is_consistent(self, tmp_path: pathlib.Path) -> None:
+ """
+ If you append to an array, the file looks as if you'd read and rewritten
+ the whole thing with ``write_js()``.
+ """
+ js_path1 = tmp_path / "data1.js"
+ js_path2 = tmp_path / "data2.js"
+
+ # We use deliberately large value, so they won't be compressed
+ # by the custom encoder.
+ value = ["1" * 10, "2" * 20, "3" * 30]
+ appended_value = ["4" * 40, "5" * 50, "6" * 60]
+
+ write_js(js_path1, varname="numbers", value=value)
+ append_to_js_array(js_path1, value=appended_value)
+
+ write_js(js_path2, varname="numbers", value=value + [appended_value])
+
+ assert js_path1.read_text() == js_path2.read_text()
+
class TestAppendToObject:
"""
@@ -375,6 +395,29 @@ class TestAppendToObject:
"sideLengths": [1, 2, 3, 4, 5],
}
+ def test_indentation_is_consistent(self, tmp_path: pathlib.Path) -> None:
+ """
+ If you append to an object, the file looks as if you'd read and
+ rewritten the whole thing with ``write_js()``.
+ """
+ js_path1 = tmp_path / "data1.js"
+ js_path2 = tmp_path / "data2.js"
+
+ # We pick a deliberately large value, so it won't be compressed
+ # by the custom encoder.
+ value = ["1" * 10, "2" * 20, "3" * 30]
+
+ write_js(js_path1, varname="shape", value={"colour": "red"})
+ append_to_js_object(js_path1, key="sides", value=value)
+
+ write_js(
+ js_path2,
+ varname="shape",
+ value={"colour": "red", "sides": value},
+ )
+
+ assert js_path1.read_text() == js_path2.read_text()
+
def test_error_if_file_doesnt_look_like_object(self, js_path: pathlib.Path) -> None:
"""
Appending to a file which doesn't contain a JSON object throws