refactor: Reorganize redact script and improve code formatting

This commit is contained in:
Paul Gauthier 2025-03-11 19:30:46 -07:00 committed by Paul Gauthier (aider)
parent 533e5ec03f
commit 9513d307a1

View file

@ -1,12 +1,13 @@
#!/usr/bin/env python3 #!/usr/bin/env python3
import json
import os
import re import re
import sys import sys
import os
import json
# Speed up factor for the recording # Speed up factor for the recording
SPEEDUP = 1.25 SPEEDUP = 1.25
def process_file(input_path, output_path): def process_file(input_path, output_path):
""" """
Process an asciinema cast v2 file to filter out certain sections based on ANSI cursor commands. Process an asciinema cast v2 file to filter out certain sections based on ANSI cursor commands.
@ -22,13 +23,16 @@ def process_file(input_path, output_path):
""" """
skip_mode = False skip_mode = False
target_pattern = None target_pattern = None
ansi_pattern = re.compile(r'\u001b\[(\d+);(\d+)H') ansi_pattern = re.compile(r"\u001b\[(\d+);(\d+)H")
is_first_line = True is_first_line = True
last_timestamp = 0.0 last_timestamp = 0.0
time_offset = 0.0 # Accumulator for time to subtract time_offset = 0.0 # Accumulator for time to subtract
max_gap = 0.5 # Maximum allowed time gap between events max_gap = 0.5 # Maximum allowed time gap between events
with open(input_path, 'r', encoding='utf-8') as infile, open(output_path, 'w', encoding='utf-8') as outfile: with (
open(input_path, "r", encoding="utf-8") as infile,
open(output_path, "w", encoding="utf-8") as outfile,
):
for line in infile: for line in infile:
# Always include the header (first line) # Always include the header (first line)
if is_first_line: if is_first_line:
@ -49,13 +53,13 @@ def process_file(input_path, output_path):
# If we're not in skip mode, check if we need to enter it # If we're not in skip mode, check if we need to enter it
if not skip_mode: if not skip_mode:
if '\u001b[' in text and 'Atuin' in text: if "\u001b[" in text and "Atuin" in text:
match = ansi_pattern.search(text) match = ansi_pattern.search(text)
if match: if match:
row = match.group(1) row = match.group(1)
col = int(match.group(2)) col = int(match.group(2))
# Create pattern for the ending sequence # Create pattern for the ending sequence
target_pattern = f'\u001b[{row};{col-1}H' target_pattern = f"\u001b[{row};{col-1}H"
skip_mode = True skip_mode = True
# Start tracking time to subtract # Start tracking time to subtract
skip_start_time = current_timestamp skip_start_time = current_timestamp
@ -79,14 +83,14 @@ def process_file(input_path, output_path):
last_timestamp = adjusted_timestamp last_timestamp = adjusted_timestamp
# Apply speedup factor to the timestamp # Apply speedup factor to the timestamp
record[0] = adjusted_timestamp / SPEEDUP record[0] = adjusted_timestamp / SPEEDUP
outfile.write(json.dumps(record) + '\n') outfile.write(json.dumps(record) + "\n")
# If we're in skip mode, check if we should exit it # If we're in skip mode, check if we should exit it
else: else:
if target_pattern in text: if target_pattern in text:
skip_mode = False skip_mode = False
# Calculate how much time to subtract from future timestamps # Calculate how much time to subtract from future timestamps
time_offset += (current_timestamp - skip_start_time) time_offset += current_timestamp - skip_start_time
# Add a 0.5 second pause after each skip section # Add a 0.5 second pause after each skip section
last_timestamp += 0.5 last_timestamp += 0.5
@ -108,13 +112,14 @@ def process_file(input_path, output_path):
last_timestamp = adjusted_timestamp last_timestamp = adjusted_timestamp
# Apply speedup factor to the timestamp # Apply speedup factor to the timestamp
record[0] = adjusted_timestamp / SPEEDUP record[0] = adjusted_timestamp / SPEEDUP
outfile.write(json.dumps(record) + '\n') outfile.write(json.dumps(record) + "\n")
# Otherwise we're still in skip mode, don't write anything # Otherwise we're still in skip mode, don't write anything
except json.JSONDecodeError: except json.JSONDecodeError:
# If we can't parse the line as JSON, include it anyway # If we can't parse the line as JSON, include it anyway
outfile.write(line) outfile.write(line)
if __name__ == "__main__": if __name__ == "__main__":
if len(sys.argv) != 3: if len(sys.argv) != 3:
print(f"Usage: {os.path.basename(sys.argv[0])} input_file output_file") print(f"Usage: {os.path.basename(sys.argv[0])} input_file output_file")