Merge pull request #3521 from Textualize/splitcells-fix

Segment.split_cells fix
This commit is contained in:
Will McGugan 2024-10-04 12:49:13 +01:00 committed by GitHub
commit 0f2f51b872
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 41 additions and 26 deletions

View File

@ -5,11 +5,12 @@ All notable changes to this project will be documented in this file.
The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/),
and this project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html).
## Unreleased
## [13.9.2] - 2024-10-04
### Fixed
- Fixed `Table` columns not highlighting when added by `add_row` https://github.com/Textualize/rich/issues/3517
- Fixed an issue with Segment.split_cells reported in Textual https://github.com/Textualize/textual/issues/5090
## [13.9.1] - 2024-10-01
@ -2096,6 +2097,7 @@ Major version bump for a breaking change to `Text.stylize signature`, which corr
- First official release, API still to be stabilized
[13.9.2]: https://github.com/textualize/rich/compare/v13.9.1...v13.9.2
[13.9.1]: https://github.com/textualize/rich/compare/v13.9.0...v13.9.1
[13.9.0]: https://github.com/textualize/rich/compare/v13.8.1...v13.9.0
[13.8.1]: https://github.com/textualize/rich/compare/v13.8.0...v13.8.1

View File

@ -2,7 +2,7 @@
name = "rich"
homepage = "https://github.com/Textualize/rich"
documentation = "https://rich.readthedocs.io/en/latest/"
version = "13.9.1"
version = "13.9.2"
description = "Render rich text, tables, progress bars, syntax highlighting, markdown and more to the terminal"
authors = ["Will McGugan <willmcgugan@gmail.com>"]
license = "MIT"

View File

@ -129,34 +129,31 @@ class Segment(NamedTuple):
cell_size = get_character_cell_size
pos = int((cut / cell_length) * (len(text))) - 1
if pos < 0:
pos = 0
pos = int((cut / cell_length) * len(text))
before = text[:pos]
cell_pos = cell_len(before)
if cell_pos == cut:
return (
_Segment(before, style, control),
_Segment(text[pos:], style, control),
)
while pos < len(text):
char = text[pos]
pos += 1
cell_pos += cell_size(char)
while True:
before = text[:pos]
if cell_pos == cut:
cell_pos = cell_len(before)
out_by = cell_pos - cut
if not out_by:
return (
_Segment(before, style, control),
_Segment(text[pos:], style, control),
)
if cell_pos > cut:
if out_by == -1 and cell_size(text[pos]) == 2:
return (
_Segment(before[: pos - 1] + " ", style, control),
_Segment(text[:pos] + " ", style, control),
_Segment(" " + text[pos + 1 :], style, control),
)
if out_by == +1 and cell_size(text[pos - 1]) == 2:
return (
_Segment(text[: pos - 1] + " ", style, control),
_Segment(" " + text[pos:], style, control),
)
raise AssertionError("Will never reach here")
if cell_pos < cut:
pos += 1
else:
pos -= 1
def split_cells(self, cut: int) -> Tuple["Segment", "Segment"]:
"""Split segment in to two segments at the specified column.

View File

@ -285,14 +285,30 @@ def test_split_cells_emoji(text, split, result):
assert Segment(text).split_cells(split) == result
def test_split_cells_mixed() -> None:
@pytest.mark.parametrize(
"segment",
[
Segment("早乙女リリエル (CV: 徳井青)"),
Segment("メイド・イン・きゅんクチュアリ☆ "),
Segment("TVアニメ「メルクストーリア -無気力少年と瓶の中の少女-」 主題歌CD"),
Segment("南無阿弥JKうらめしや? "),
Segment("メルク (CV: 水瀬いのり) "),
Segment(" メルク (CV: 水瀬いのり) "),
Segment(" メルク (CV: 水瀬いのり) "),
Segment(" メルク (CV: 水瀬いのり) "),
],
)
def test_split_cells_mixed(segment: Segment) -> None:
"""Check that split cells splits on cell positions."""
# Caused https://github.com/Textualize/textual/issues/4996 in Textual
test = Segment("早乙女リリエル (CV: 徳井青)")
for position in range(1, test.cell_length):
left, right = Segment.split_cells(test, position)
for position in range(0, segment.cell_length + 1):
left, right = Segment.split_cells(segment, position)
assert all(
cell_len(c) > 0 for c in segment.text
) # Sanity check there aren't any sneaky control codes
assert cell_len(left.text) == position
assert cell_len(right.text) == test.cell_length - position
assert cell_len(right.text) == segment.cell_length - position
def test_split_cells_doubles() -> None: