Datasets:
example_name
stringlengths 10
28
| python_file
stringlengths 9
32
| python_code
stringlengths 490
18.2k
| rust_code
stringlengths 0
434
| has_rust
bool 2
classes | category
stringlengths 2
20
| python_lines
int32 13
586
| rust_lines
int32 0
6
| blocking_features
listlengths 0
7
| suspiciousness
float32 0
0.95
| error
stringlengths 33
500
⌀ |
|---|---|---|---|---|---|---|---|---|---|---|
example_abs
|
abs_tool.py
|
#!/usr/bin/env python3
"""Abs Example - Absolute value operations CLI.
Examples:
>>> compute_abs_int(-5)
5
>>> compute_abs_int(5)
5
>>> compute_abs_float(-3.14)
3.14
>>> compute_abs_float(2.71)
2.71
"""
import argparse
def compute_abs_int(x: int) -> int:
"""Compute absolute value of integer.
>>> compute_abs_int(-10)
10
>>> compute_abs_int(0)
0
>>> compute_abs_int(42)
42
"""
if x < 0:
return -x
return x
def compute_abs_float(x: float) -> float:
"""Compute absolute value of float.
>>> compute_abs_float(-2.5)
2.5
>>> compute_abs_float(0.0)
0.0
>>> compute_abs_float(1.5)
1.5
"""
if x < 0:
return -x
return x
def main():
parser = argparse.ArgumentParser(description="Absolute value tool")
subs = parser.add_subparsers(dest="cmd", required=True)
i = subs.add_parser("int")
i.add_argument("x", type=int)
f = subs.add_parser("float")
f.add_argument("x", type=float)
args = parser.parse_args()
if args.cmd == "int":
print(compute_abs_int(args.x))
elif args.cmd == "float":
print(compute_abs_float(args.x))
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_abs/abs_tool.py (1236 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_abs/abs_tool.rs (2536 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_abs/Cargo.toml (1 dependencies)
⏱️ Parse time: 48ms
📊 Throughput: 25.1 KB/s
⏱️ Total time: 48ms
| true
|
abs
| 65
| 6
|
[] | 0
| null |
example_abs
|
test_abs_tool.py
|
"""Tests for abs_tool - EXTREME TDD."""
import subprocess
from pathlib import Path
SCRIPT = Path(__file__).parent / "abs_tool.py"
def run(cmd):
return subprocess.run(
["python3", str(SCRIPT)] + cmd.split(),
capture_output=True,
text=True,
)
def test_int_positive():
r = run("int 5")
assert r.returncode == 0
assert r.stdout.strip() == "5"
def test_int_negative():
r = run("int -42")
assert r.returncode == 0
assert r.stdout.strip() == "42"
def test_float_positive():
r = run("float 3.14")
assert r.returncode == 0
assert r.stdout.strip() == "3.14"
def test_float_negative():
r = run("float -2.5")
assert r.returncode == 0
assert r.stdout.strip() == "2.5"
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_abs/test_abs_tool.py (747 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_abs/test_abs_tool.rs (2077 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_abs/Cargo.toml (2 dependencies)
⏱️ Parse time: 49ms
📊 Throughput: 14.7 KB/s
⏱️ Total time: 49ms
| true
|
abs
| 38
| 6
|
[] | 0
| null |
example_age_calculator
|
age_cli.py
|
#!/usr/bin/env python3
"""Age calculator CLI.
Calculate age and related date information.
"""
import argparse
import sys
from datetime import date, datetime
def parse_date(date_str: str) -> date | None:
"""Parse date string into date object."""
formats = [
"%Y-%m-%d",
"%d/%m/%Y",
"%m/%d/%Y",
"%Y/%m/%d",
"%d-%m-%Y",
]
for fmt in formats:
try:
return datetime.strptime(date_str, fmt).date()
except ValueError:
continue
return None
def calculate_age(birth_date: date, as_of: date | None = None) -> dict:
"""Calculate age in years, months, days."""
if as_of is None:
as_of = date.today()
# Calculate years
years = as_of.year - birth_date.year
# Adjust if birthday hasn't occurred this year
if (as_of.month, as_of.day) < (birth_date.month, birth_date.day):
years -= 1
# Calculate months
months = as_of.month - birth_date.month
if as_of.day < birth_date.day:
months -= 1
if months < 0:
months += 12
# Calculate days
days = as_of.day - birth_date.day
if days < 0:
# Get days in previous month
prev_month = as_of.month - 1 if as_of.month > 1 else 12
prev_year = as_of.year if as_of.month > 1 else as_of.year - 1
days_in_prev = days_in_month(prev_year, prev_month)
days += days_in_prev
return {
"years": years,
"months": months,
"days": days,
}
def days_in_month(year: int, month: int) -> int:
"""Get number of days in a month."""
if month in [1, 3, 5, 7, 8, 10, 12]:
return 31
if month in [4, 6, 9, 11]:
return 30
# February
if is_leap_year(year):
return 29
return 28
def is_leap_year(year: int) -> bool:
"""Check if year is a leap year."""
if year % 400 == 0:
return True
if year % 100 == 0:
return False
if year % 4 == 0:
return True
return False
def total_days(birth_date: date, as_of: date | None = None) -> int:
"""Calculate total days lived."""
if as_of is None:
as_of = date.today()
return (as_of - birth_date).days
def next_birthday(birth_date: date, as_of: date | None = None) -> date:
"""Calculate next birthday."""
if as_of is None:
as_of = date.today()
# Try this year
try:
this_year = date(as_of.year, birth_date.month, birth_date.day)
except ValueError:
# Feb 29 in non-leap year
this_year = date(as_of.year, 3, 1)
if this_year > as_of:
return this_year
# Next year
try:
return date(as_of.year + 1, birth_date.month, birth_date.day)
except ValueError:
return date(as_of.year + 1, 3, 1)
def days_until_birthday(birth_date: date, as_of: date | None = None) -> int:
"""Days until next birthday."""
if as_of is None:
as_of = date.today()
return (next_birthday(birth_date, as_of) - as_of).days
def zodiac_sign(birth_date: date) -> str:
"""Get zodiac sign for birth date."""
month = birth_date.month
day = birth_date.day
signs = [
(1, 20, "Capricorn"),
(2, 19, "Aquarius"),
(3, 20, "Pisces"),
(4, 20, "Aries"),
(5, 21, "Taurus"),
(6, 21, "Gemini"),
(7, 22, "Cancer"),
(8, 23, "Leo"),
(9, 23, "Virgo"),
(10, 23, "Libra"),
(11, 22, "Scorpio"),
(12, 22, "Sagittarius"),
]
for end_month, end_day, sign in signs:
if month == end_month and day <= end_day:
return sign
if month < end_month:
# Return previous sign
idx = signs.index((end_month, end_day, sign))
return signs[idx - 1][2]
return "Capricorn"
def day_of_week(d: date) -> str:
"""Get day of week name."""
days = ["Monday", "Tuesday", "Wednesday", "Thursday", "Friday", "Saturday", "Sunday"]
return days[d.weekday()]
def format_age(age: dict) -> str:
"""Format age dict for display."""
parts = []
if age["years"] > 0:
parts.append(f"{age['years']} year{'s' if age['years'] != 1 else ''}")
if age["months"] > 0:
parts.append(f"{age['months']} month{'s' if age['months'] != 1 else ''}")
if age["days"] > 0 or not parts:
parts.append(f"{age['days']} day{'s' if age['days'] != 1 else ''}")
return ", ".join(parts)
def main() -> int:
parser = argparse.ArgumentParser(description="Calculate age and date information")
parser.add_argument("birthdate", help="Birth date (YYYY-MM-DD)")
parser.add_argument("--as-of", metavar="DATE", help="Calculate as of date (default: today)")
parser.add_argument("--days", action="store_true", help="Show total days")
parser.add_argument("--next", action="store_true", help="Show next birthday")
parser.add_argument("--zodiac", action="store_true", help="Show zodiac sign")
parser.add_argument("--json", action="store_true", help="Output as JSON")
args = parser.parse_args()
birth = parse_date(args.birthdate)
if not birth:
print(f"Invalid date format: {args.birthdate}", file=sys.stderr)
return 1
as_of = None
if args.as_of:
as_of = parse_date(args.as_of)
if not as_of:
print(f"Invalid date format: {args.as_of}", file=sys.stderr)
return 1
age = calculate_age(birth, as_of)
if args.json:
import json
result = {
"birth_date": str(birth),
"age": age,
"total_days": total_days(birth, as_of),
"next_birthday": str(next_birthday(birth, as_of)),
"days_until_birthday": days_until_birthday(birth, as_of),
"zodiac": zodiac_sign(birth),
"birth_day": day_of_week(birth),
}
print(json.dumps(result, indent=2))
return 0
if args.days:
print(total_days(birth, as_of))
return 0
if args.next:
nb = next_birthday(birth, as_of)
days = days_until_birthday(birth, as_of)
print(f"Next birthday: {nb} ({days} days)")
return 0
if args.zodiac:
print(zodiac_sign(birth))
return 0
# Default: show full info
print(f"Birth date: {birth} ({day_of_week(birth)})")
print(f"Age: {format_age(age)}")
print(f"Total days: {total_days(birth, as_of):,}")
print(f"Zodiac: {zodiac_sign(birth)}")
nb = next_birthday(birth, as_of)
days = days_until_birthday(birth, as_of)
print(f"Next birthday: {nb} ({days} days away)")
return 0
if __name__ == "__main__":
sys.exit(main())
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_age_calculator/age_cli.py (6645 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_age_calculator/age_cli.rs (14113 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_age_calculator/Cargo.toml (4 dependencies)
⏱️ Parse time: 55ms
📊 Throughput: 117.4 KB/s
⏱️ Total time: 55ms
| true
|
age_calculator
| 241
| 6
|
[
"exception_handling"
] | 0.577
| null |
example_age_calculator
|
test_age_cli.py
|
"""Tests for age_cli.py"""
from datetime import date
from age_cli import (
calculate_age,
day_of_week,
days_in_month,
days_until_birthday,
format_age,
is_leap_year,
next_birthday,
parse_date,
total_days,
zodiac_sign,
)
class TestParseDate:
def test_iso_format(self):
result = parse_date("2000-01-15")
assert result == date(2000, 1, 15)
def test_slash_dmy(self):
result = parse_date("15/01/2000")
assert result == date(2000, 1, 15)
def test_slash_mdy(self):
result = parse_date("01/15/2000")
assert result == date(2000, 1, 15)
def test_invalid(self):
assert parse_date("not a date") is None
assert parse_date("2000-13-01") is None
class TestIsLeapYear:
def test_leap_year_divisible_by_4(self):
assert is_leap_year(2020) is True
def test_not_leap_divisible_by_100(self):
assert is_leap_year(1900) is False
def test_leap_divisible_by_400(self):
assert is_leap_year(2000) is True
def test_not_leap(self):
assert is_leap_year(2023) is False
class TestDaysInMonth:
def test_january(self):
assert days_in_month(2023, 1) == 31
def test_april(self):
assert days_in_month(2023, 4) == 30
def test_february_normal(self):
assert days_in_month(2023, 2) == 28
def test_february_leap(self):
assert days_in_month(2024, 2) == 29
class TestCalculateAge:
def test_exact_years(self):
birth = date(2000, 6, 15)
as_of = date(2023, 6, 15)
result = calculate_age(birth, as_of)
assert result["years"] == 23
assert result["months"] == 0
assert result["days"] == 0
def test_before_birthday(self):
birth = date(2000, 6, 15)
as_of = date(2023, 6, 10)
result = calculate_age(birth, as_of)
assert result["years"] == 22
def test_after_birthday(self):
birth = date(2000, 6, 15)
as_of = date(2023, 6, 20)
result = calculate_age(birth, as_of)
assert result["years"] == 23
assert result["days"] == 5
def test_months_calculation(self):
birth = date(2000, 1, 15)
as_of = date(2000, 4, 15)
result = calculate_age(birth, as_of)
assert result["years"] == 0
assert result["months"] == 3
assert result["days"] == 0
class TestTotalDays:
def test_simple(self):
birth = date(2000, 1, 1)
as_of = date(2000, 1, 11)
assert total_days(birth, as_of) == 10
def test_year(self):
birth = date(2000, 1, 1)
as_of = date(2001, 1, 1)
assert total_days(birth, as_of) == 366 # 2000 is leap year
class TestNextBirthday:
def test_this_year(self):
birth = date(2000, 12, 25)
as_of = date(2023, 6, 1)
result = next_birthday(birth, as_of)
assert result == date(2023, 12, 25)
def test_next_year(self):
birth = date(2000, 1, 15)
as_of = date(2023, 6, 1)
result = next_birthday(birth, as_of)
assert result == date(2024, 1, 15)
def test_today(self):
birth = date(2000, 6, 15)
as_of = date(2023, 6, 15)
result = next_birthday(birth, as_of)
# Birthday is today, so next is next year
assert result == date(2024, 6, 15)
class TestDaysUntilBirthday:
def test_same_month(self):
birth = date(2000, 6, 20)
as_of = date(2023, 6, 15)
result = days_until_birthday(birth, as_of)
assert result == 5
class TestZodiacSign:
def test_aries(self):
assert zodiac_sign(date(2000, 4, 10)) == "Aries"
def test_taurus(self):
assert zodiac_sign(date(2000, 5, 5)) == "Taurus"
def test_cancer(self):
assert zodiac_sign(date(2000, 7, 15)) == "Cancer"
def test_capricorn_december(self):
assert zodiac_sign(date(2000, 12, 25)) == "Capricorn"
def test_capricorn_january(self):
assert zodiac_sign(date(2000, 1, 10)) == "Capricorn"
class TestDayOfWeek:
def test_monday(self):
assert day_of_week(date(2023, 12, 25)) == "Monday"
def test_friday(self):
assert day_of_week(date(2023, 12, 29)) == "Friday"
class TestFormatAge:
def test_full(self):
age = {"years": 25, "months": 3, "days": 10}
result = format_age(age)
assert "25 years" in result
assert "3 months" in result
assert "10 days" in result
def test_singular(self):
age = {"years": 1, "months": 1, "days": 1}
result = format_age(age)
assert "1 year" in result
assert "1 month" in result
assert "1 day" in result
def test_zero_years(self):
age = {"years": 0, "months": 6, "days": 15}
result = format_age(age)
assert "year" not in result
assert "6 months" in result
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_age_calculator/test_age_cli.py (4878 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_age_calculator/test_age_cli.rs (10133 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_age_calculator/Cargo.toml (1 dependencies)
⏱️ Parse time: 53ms
📊 Throughput: 89.2 KB/s
⏱️ Total time: 53ms
| true
|
age_calculator
| 181
| 6
|
[
"class_definition"
] | 0.612
| null |
example_any_all
|
any_all_tool.py
|
#!/usr/bin/env python3
"""Any All Example - Any/all operations CLI.
Examples:
>>> check_any(0, 0, 1, 0)
True
>>> check_all(1, 1, 1, 1)
True
"""
import argparse
def check_any(a: int, b: int, c: int, d: int) -> bool:
"""Check if any value is truthy.
>>> check_any(0, 0, 0, 0)
False
>>> check_any(1, 0, 0, 0)
True
>>> check_any(0, 0, 0, 1)
True
"""
return a != 0 or b != 0 or c != 0 or d != 0
def check_all(a: int, b: int, c: int, d: int) -> bool:
"""Check if all values are truthy.
>>> check_all(1, 1, 1, 1)
True
>>> check_all(1, 1, 1, 0)
False
>>> check_all(0, 0, 0, 0)
False
"""
return a != 0 and b != 0 and c != 0 and d != 0
def main():
parser = argparse.ArgumentParser(description="Any/all operations tool")
subs = parser.add_subparsers(dest="cmd", required=True)
a = subs.add_parser("any")
a.add_argument("a", type=int)
a.add_argument("b", type=int)
a.add_argument("c", type=int)
a.add_argument("d", type=int)
al = subs.add_parser("all")
al.add_argument("a", type=int)
al.add_argument("b", type=int)
al.add_argument("c", type=int)
al.add_argument("d", type=int)
args = parser.parse_args()
if args.cmd == "any":
print("true" if check_any(args.a, args.b, args.c, args.d) else "false")
elif args.cmd == "all":
print("true" if check_all(args.a, args.b, args.c, args.d) else "false")
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_any_all/any_all_tool.py (1498 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_any_all/any_all_tool.rs (2238 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_any_all/Cargo.toml (1 dependencies)
⏱️ Parse time: 58ms
📊 Throughput: 24.8 KB/s
⏱️ Total time: 59ms
| true
|
any_all
| 63
| 6
|
[] | 0
| null |
example_any_all
|
test_any_all_tool.py
|
"""Tests for any_all_tool - EXTREME TDD."""
import subprocess
from pathlib import Path
SCRIPT = Path(__file__).parent / "any_all_tool.py"
def run(cmd):
return subprocess.run(
["python3", str(SCRIPT)] + cmd.split(),
capture_output=True,
text=True,
)
def test_any_true():
r = run("any 0 0 1 0")
assert r.returncode == 0
assert r.stdout.strip() == "true"
def test_any_false():
r = run("any 0 0 0 0")
assert r.returncode == 0
assert r.stdout.strip() == "false"
def test_all_true():
r = run("all 1 1 1 1")
assert r.returncode == 0
assert r.stdout.strip() == "true"
def test_all_false():
r = run("all 1 0 1 1")
assert r.returncode == 0
assert r.stdout.strip() == "false"
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_any_all/test_any_all_tool.py (757 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_any_all/test_any_all_tool.rs (2083 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_any_all/Cargo.toml (2 dependencies)
⏱️ Parse time: 52ms
📊 Throughput: 14.1 KB/s
⏱️ Total time: 52ms
| true
|
any_all
| 38
| 6
|
[] | 0
| null |
example_argparse_minimal
|
minimal_cli.py
|
#!/usr/bin/env python3
"""
Minimal Argparse CLI - Baseline example that must compile
This is the absolute minimum argparse example:
- Single positional argument
- Single optional argument
- No subcommands
- Minimal handler
Purpose: Baseline CLI that depyler must handle correctly.
"""
import argparse
def main():
"""Main entry point with minimal argparse."""
parser = argparse.ArgumentParser(
description="Minimal CLI example",
prog="minimal_cli.py",
)
# Single positional argument
parser.add_argument(
"input",
help="Input value",
)
# Single optional argument
parser.add_argument(
"-u",
"--upper",
action="store_true",
help="Convert to uppercase",
)
args = parser.parse_args()
# Minimal handler
if args.upper:
print(args.input.upper())
else:
print(args.input)
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_argparse_minimal/minimal_cli.py (942 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_argparse_minimal/minimal_cli.rs (582 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_argparse_minimal/Cargo.toml (1 dependencies)
⏱️ Parse time: 48ms
📊 Throughput: 19.0 KB/s
⏱️ Total time: 48ms
| true
|
argparse_minimal
| 48
| 6
|
[
"context_manager"
] | 0.652
| null |
example_argparse_minimal
|
test_minimal_cli.py
|
"""
Test suite for minimal_cli.py
Baseline CLI that must work with depyler
Following extreme TDD methodology.
"""
import subprocess
from pathlib import Path
import pytest
SCRIPT = Path(__file__).parent / "minimal_cli.py"
def run_cli(*args):
"""Helper to run CLI and capture output."""
result = subprocess.run(
["python3", str(SCRIPT), *args],
capture_output=True,
text=True,
)
return result
class TestMinimalCLI:
"""Test suite for minimal CLI."""
def test_help_flag(self):
"""Test --help displays usage."""
result = run_cli("--help")
assert result.returncode == 0
assert "usage:" in result.stdout.lower()
assert "input" in result.stdout.lower()
assert "--upper" in result.stdout
def test_basic_execution(self):
"""Test basic execution with input."""
result = run_cli("hello")
assert result.returncode == 0
assert "hello" in result.stdout
def test_uppercase_short_flag(self):
"""Test -u flag for uppercase."""
result = run_cli("-u", "hello")
assert result.returncode == 0
assert "HELLO" in result.stdout
def test_uppercase_long_flag(self):
"""Test --upper flag for uppercase."""
result = run_cli("--upper", "hello")
assert result.returncode == 0
assert "HELLO" in result.stdout
def test_missing_input(self):
"""Test error when input is missing."""
result = run_cli()
assert result.returncode != 0
assert "required" in result.stderr.lower() or "argument" in result.stderr.lower()
@pytest.mark.parametrize(
"input_val,expected",
[
("hello", "hello"),
("WORLD", "WORLD"),
("MixedCase", "MixedCase"),
("123", "123"),
("", ""),
],
)
def test_various_inputs(self, input_val, expected):
"""Test various input values."""
result = run_cli(input_val)
assert result.returncode == 0
assert expected in result.stdout
@pytest.mark.parametrize(
"input_val,expected",
[
("hello", "HELLO"),
("world", "WORLD"),
("MixedCase", "MIXEDCASE"),
("123", "123"),
],
)
def test_uppercase_various(self, input_val, expected):
"""Test uppercase with various inputs."""
result = run_cli("-u", input_val)
assert result.returncode == 0
assert expected in result.stdout
def test_special_characters(self):
"""Test with special characters."""
result = run_cli("hello world!")
assert result.returncode == 0
assert "hello world!" in result.stdout
def test_unicode(self):
"""Test with unicode input."""
result = run_cli("hello 世界")
assert result.returncode == 0
assert "hello 世界" in result.stdout
def test_output_ends_newline(self):
"""Test output ends with newline."""
result = run_cli("test")
assert result.returncode == 0
assert result.stdout.endswith("\n")
def test_stderr_empty_on_success(self):
"""Test stderr empty on success."""
result = run_cli("test")
assert result.returncode == 0
assert result.stderr == ""
def test_deterministic(self):
"""Test deterministic output."""
results = [run_cli("test") for _ in range(3)]
assert all(r.returncode == 0 for r in results)
assert all(r.stdout == results[0].stdout for r in results)
def test_flag_before_input(self):
"""Test flag can come before input."""
result = run_cli("-u", "hello")
assert result.returncode == 0
assert "HELLO" in result.stdout
def test_flag_after_input(self):
"""Test flag can come after input."""
result = run_cli("hello", "-u")
assert result.returncode == 0
assert "HELLO" in result.stdout
def test_invalid_flag(self):
"""Test error with invalid flag."""
result = run_cli("--invalid", "test")
assert result.returncode != 0
assert "unrecognized" in result.stderr.lower() or "invalid" in result.stderr.lower()
| false
|
argparse_minimal
| 138
| 0
|
[
"context_manager",
"class_definition",
"stdin_usage",
"decorator"
] | 0.652
|
Performance Warnings
══════════════════════════════════════════════════
[1] [Medium] Large value 'args' passed by copy
Location: run_cli, line 0
Impact: Complexity: O(n), Scales: Yes, Hot path: No
Why: Passing large values by copy is inefficient
Fix: Consider passing by reference (&) or using Box/Arc for large types
Summary: Found 1 warnings (0 critical, 0 high severity)
Profiling Report
══════════════════════════════════════════════════
Summary
Total estimated instructions:
|
|
example_array
|
array_tool.py
|
#!/usr/bin/env python3
"""Array Example - Aggregate operations CLI."""
import argparse
def main():
parser = argparse.ArgumentParser(description="Array operations tool")
subs = parser.add_subparsers(dest="cmd", required=True)
s = subs.add_parser("sum")
s.add_argument("a", type=int)
s.add_argument("b", type=int)
s.add_argument("c", type=int)
p = subs.add_parser("product")
p.add_argument("a", type=int)
p.add_argument("b", type=int)
p.add_argument("c", type=int)
args = parser.parse_args()
if args.cmd == "sum":
print(args.a + args.b + args.c)
elif args.cmd == "product":
print(args.a * args.b * args.c)
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_array/array_tool.py (718 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_array/array_tool.rs (1043 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_array/Cargo.toml (1 dependencies)
⏱️ Parse time: 48ms
📊 Throughput: 14.5 KB/s
⏱️ Total time: 48ms
| true
|
array
| 29
| 6
|
[] | 0
| null |
example_array
|
test_array_tool.py
|
#!/usr/bin/env python3
"""EXTREME TDD: Tests for array CLI."""
import subprocess
SCRIPT = "array_tool.py"
def run(args): return subprocess.run(["python3", SCRIPT] + args, capture_output=True, text=True, cwd=__file__.rsplit("/", 1)[0])
class TestSum:
def test_sum(self): r = run(["sum", "1", "2", "3"]); assert r.returncode == 0 and "6" in r.stdout
class TestProduct:
def test_product(self): r = run(["product", "2", "3", "4"]); assert r.returncode == 0 and "24" in r.stdout
class TestHelp:
def test_help(self): assert run(["--help"]).returncode == 0
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_array/test_array_tool.py (566 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_array/test_array_tool.rs (2004 bytes)
⏱️ Parse time: 49ms
📊 Throughput: 11.1 KB/s
⏱️ Total time: 49ms
| true
|
array
| 15
| 5
|
[
"class_definition"
] | 0.612
| null |
example_ascii
|
ascii_tool.py
|
#!/usr/bin/env python3
"""Ascii Example - ASCII operations CLI."""
import argparse
def main():
parser = argparse.ArgumentParser(description="ASCII operations tool")
subs = parser.add_subparsers(dest="cmd", required=True)
u = subs.add_parser("upper")
u.add_argument("code", type=int)
lo = subs.add_parser("lower")
lo.add_argument("code", type=int)
d = subs.add_parser("digit")
d.add_argument("n", type=int)
args = parser.parse_args()
if args.cmd == "upper":
print(chr(args.code))
elif args.cmd == "lower":
print(chr(args.code))
elif args.cmd == "digit":
print(ord("0") + args.n)
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_ascii/ascii_tool.py (695 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_ascii/ascii_tool.rs (1046 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_ascii/Cargo.toml (1 dependencies)
⏱️ Parse time: 49ms
📊 Throughput: 13.7 KB/s
⏱️ Total time: 49ms
| true
|
ascii
| 28
| 6
|
[] | 0
| null |
example_ascii
|
test_ascii_tool.py
|
"""Tests for ascii_tool - EXTREME TDD."""
import subprocess
from pathlib import Path
SCRIPT = Path(__file__).parent / "ascii_tool.py"
def run(cmd):
return subprocess.run(
["python3", str(SCRIPT)] + cmd.split(),
capture_output=True,
text=True,
)
def test_upper():
r = run("upper 65")
assert r.returncode == 0
assert r.stdout.strip() == "A"
def test_lower():
r = run("lower 97")
assert r.returncode == 0
assert r.stdout.strip() == "a"
def test_digit():
r = run("digit 5")
assert r.returncode == 0
assert r.stdout.strip() == "53"
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_ascii/test_ascii_tool.py (605 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_ascii/test_ascii_tool.rs (1813 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_ascii/Cargo.toml (2 dependencies)
⏱️ Parse time: 48ms
📊 Throughput: 12.2 KB/s
⏱️ Total time: 48ms
| true
|
ascii
| 32
| 6
|
[] | 0
| null |
example_async_basic
|
async_basic_cli.py
|
#!/usr/bin/env python3
"""Async Basic CLI.
Basic async/await patterns and coroutine functions.
"""
import argparse
import asyncio
import sys
async def async_identity(value: int) -> int:
"""Simple async identity function."""
return value
async def async_add(a: int, b: int) -> int:
"""Async addition."""
return a + b
async def async_multiply(a: int, b: int) -> int:
"""Async multiplication."""
return a * b
async def async_delay(delay_ms: int) -> int:
"""Async function with delay."""
await asyncio.sleep(delay_ms / 1000.0)
return delay_ms
async def async_chain(value: int) -> int:
"""Chain multiple async operations."""
v1 = await async_identity(value)
v2 = await async_add(v1, 10)
v3 = await async_multiply(v2, 2)
return v3
async def async_conditional(value: int) -> str:
"""Async with conditional logic."""
result = await async_identity(value)
if result > 0:
return "positive"
elif result < 0:
return "negative"
else:
return "zero"
async def async_loop(count: int) -> int:
"""Async loop accumulator."""
total = 0
for i in range(count):
total = await async_add(total, i)
return total
async def async_early_return(values: list[int]) -> int:
"""Async with early return."""
for value in values:
result = await async_identity(value)
if result < 0:
return result
return 0
async def async_try_operation(value: int) -> int:
"""Async operation that may fail."""
if value < 0:
raise ValueError(f"Negative value: {value}")
return await async_identity(value * 2)
async def async_safe_operation(value: int) -> tuple[bool, int]:
"""Async operation with error handling."""
try:
result = await async_try_operation(value)
return (True, result)
except ValueError:
return (False, 0)
async def async_map(values: list[int], transform: int) -> list[int]:
"""Async map operation."""
results: list[int] = []
for v in values:
result = await async_add(v, transform)
results.append(result)
return results
async def async_filter_positive(values: list[int]) -> list[int]:
"""Async filter for positive values."""
results: list[int] = []
for v in values:
val = await async_identity(v)
if val > 0:
results.append(val)
return results
async def async_reduce(values: list[int], initial: int) -> int:
"""Async reduce/fold operation."""
accumulator = initial
for v in values:
accumulator = await async_add(accumulator, v)
return accumulator
async def async_find_first(values: list[int], target: int) -> int:
"""Async find first matching value."""
for i, v in enumerate(values):
val = await async_identity(v)
if val == target:
return i
return -1
async def async_any_match(values: list[int], target: int) -> bool:
"""Async check if any value matches."""
for v in values:
val = await async_identity(v)
if val == target:
return True
return False
async def async_all_positive(values: list[int]) -> bool:
"""Async check if all values are positive."""
for v in values:
val = await async_identity(v)
if val <= 0:
return False
return True
async def async_count_matching(values: list[int], predicate_value: int) -> int:
"""Async count values greater than predicate."""
count = 0
for v in values:
val = await async_identity(v)
if val > predicate_value:
count += 1
return count
async def async_partition(values: list[int], pivot: int) -> tuple[list[int], list[int]]:
"""Async partition values around pivot."""
less: list[int] = []
greater_or_equal: list[int] = []
for v in values:
val = await async_identity(v)
if val < pivot:
less.append(val)
else:
greater_or_equal.append(val)
return (less, greater_or_equal)
async def async_sum_squares(values: list[int]) -> int:
"""Async sum of squares."""
total = 0
for v in values:
squared = await async_multiply(v, v)
total = await async_add(total, squared)
return total
async def async_fibonacci(n: int) -> int:
"""Async fibonacci calculation."""
if n <= 1:
return n
a, b = 0, 1
for _ in range(2, n + 1):
a, b = b, await async_add(a, b)
return b
async def async_factorial(n: int) -> int:
"""Async factorial calculation."""
if n <= 1:
return 1
result = 1
for i in range(2, n + 1):
result = await async_multiply(result, i)
return result
def run_async(coro: object) -> object:
"""Run async function synchronously."""
return asyncio.run(coro) # type: ignore
def main() -> int:
parser = argparse.ArgumentParser(description="Async basic CLI")
subparsers = parser.add_subparsers(dest="command", help="Commands")
# chain
chain_p = subparsers.add_parser("chain", help="Chain async operations")
chain_p.add_argument("value", type=int)
# sum
sum_p = subparsers.add_parser("sum", help="Async sum")
sum_p.add_argument("values", type=int, nargs="+")
# fibonacci
fib_p = subparsers.add_parser("fibonacci", help="Async fibonacci")
fib_p.add_argument("n", type=int)
# factorial
fact_p = subparsers.add_parser("factorial", help="Async factorial")
fact_p.add_argument("n", type=int)
args = parser.parse_args()
if args.command == "chain":
result = run_async(async_chain(args.value))
print(f"Result: {result}")
elif args.command == "sum":
result = run_async(async_reduce(args.values, 0))
print(f"Sum: {result}")
elif args.command == "fibonacci":
result = run_async(async_fibonacci(args.n))
print(f"Fibonacci({args.n}): {result}")
elif args.command == "factorial":
result = run_async(async_factorial(args.n))
print(f"Factorial({args.n}): {result}")
else:
parser.print_help()
return 0
if __name__ == "__main__":
sys.exit(main())
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_async_basic/async_basic_cli.py (6186 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_async_basic/async_basic_cli.rs (11954 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_async_basic/Cargo.toml (1 dependencies)
⏱️ Parse time: 55ms
📊 Throughput: 109.1 KB/s
⏱️ Total time: 55ms
| true
|
async_basic
| 241
| 6
|
[
"async_await",
"context_manager",
"exception_handling",
"functools"
] | 0.946
| null |
example_async_basic
|
test_async_basic_cli.py
|
"""Tests for async_basic_cli.py"""
import pytest
from async_basic_cli import (
async_add,
async_all_positive,
async_any_match,
async_chain,
async_conditional,
async_count_matching,
async_delay,
async_early_return,
async_factorial,
async_fibonacci,
async_filter_positive,
async_find_first,
async_identity,
async_loop,
async_map,
async_multiply,
async_partition,
async_reduce,
async_safe_operation,
async_sum_squares,
async_try_operation,
run_async,
)
class TestBasicAsync:
def test_identity(self):
result = run_async(async_identity(42))
assert result == 42
def test_add(self):
result = run_async(async_add(3, 4))
assert result == 7
def test_multiply(self):
result = run_async(async_multiply(3, 4))
assert result == 12
class TestAsyncDelay:
def test_delay_returns_value(self):
result = run_async(async_delay(10))
assert result == 10
class TestAsyncChain:
def test_chain(self):
# Chain: identity(5) = 5, add(5, 10) = 15, multiply(15, 2) = 30
result = run_async(async_chain(5))
assert result == 30
class TestAsyncConditional:
def test_positive(self):
result = run_async(async_conditional(5))
assert result == "positive"
def test_negative(self):
result = run_async(async_conditional(-5))
assert result == "negative"
def test_zero(self):
result = run_async(async_conditional(0))
assert result == "zero"
class TestAsyncLoop:
def test_loop_sum(self):
# 0 + 1 + 2 + 3 + 4 = 10
result = run_async(async_loop(5))
assert result == 10
def test_empty_loop(self):
result = run_async(async_loop(0))
assert result == 0
class TestAsyncEarlyReturn:
def test_finds_negative(self):
result = run_async(async_early_return([1, 2, -3, 4]))
assert result == -3
def test_no_negative(self):
result = run_async(async_early_return([1, 2, 3, 4]))
assert result == 0
def test_empty_list(self):
result = run_async(async_early_return([]))
assert result == 0
class TestAsyncErrorHandling:
def test_try_success(self):
result = run_async(async_try_operation(5))
assert result == 10
def test_try_failure(self):
with pytest.raises(ValueError):
run_async(async_try_operation(-1))
def test_safe_success(self):
success, result = run_async(async_safe_operation(5))
assert success is True
assert result == 10
def test_safe_failure(self):
success, result = run_async(async_safe_operation(-1))
assert success is False
assert result == 0
class TestAsyncCollections:
def test_map(self):
result = run_async(async_map([1, 2, 3], 10))
assert result == [11, 12, 13]
def test_filter_positive(self):
result = run_async(async_filter_positive([-1, 2, -3, 4]))
assert result == [2, 4]
def test_reduce(self):
result = run_async(async_reduce([1, 2, 3, 4], 0))
assert result == 10
def test_reduce_with_initial(self):
result = run_async(async_reduce([1, 2, 3], 100))
assert result == 106
class TestAsyncSearch:
def test_find_first(self):
result = run_async(async_find_first([1, 2, 3, 4, 5], 3))
assert result == 2
def test_find_first_not_found(self):
result = run_async(async_find_first([1, 2, 3], 10))
assert result == -1
def test_any_match_true(self):
result = run_async(async_any_match([1, 2, 3], 2))
assert result is True
def test_any_match_false(self):
result = run_async(async_any_match([1, 2, 3], 10))
assert result is False
class TestAsyncPredicates:
def test_all_positive_true(self):
result = run_async(async_all_positive([1, 2, 3]))
assert result is True
def test_all_positive_false(self):
result = run_async(async_all_positive([1, -2, 3]))
assert result is False
def test_count_matching(self):
result = run_async(async_count_matching([1, 5, 10, 15, 20], 10))
assert result == 2 # 15 and 20 are > 10
class TestAsyncPartition:
def test_partition(self):
less, greater = run_async(async_partition([1, 5, 3, 8, 2, 9], 5))
assert less == [1, 3, 2]
assert greater == [5, 8, 9]
class TestAsyncMath:
def test_sum_squares(self):
# 1^2 + 2^2 + 3^2 = 1 + 4 + 9 = 14
result = run_async(async_sum_squares([1, 2, 3]))
assert result == 14
def test_fibonacci(self):
assert run_async(async_fibonacci(0)) == 0
assert run_async(async_fibonacci(1)) == 1
assert run_async(async_fibonacci(10)) == 55
def test_factorial(self):
assert run_async(async_factorial(0)) == 1
assert run_async(async_factorial(1)) == 1
assert run_async(async_factorial(5)) == 120
class TestEdgeCases:
def test_empty_collections(self):
assert run_async(async_map([], 10)) == []
assert run_async(async_filter_positive([])) == []
assert run_async(async_reduce([], 0)) == 0
assert run_async(async_find_first([], 1)) == -1
assert run_async(async_any_match([], 1)) is False
assert run_async(async_all_positive([])) is True
def test_single_element(self):
assert run_async(async_map([5], 10)) == [15]
assert run_async(async_reduce([5], 0)) == 5
assert run_async(async_find_first([5], 5)) == 0
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_async_basic/test_async_basic_cli.py (5611 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_async_basic/test_async_basic_cli.rs (13097 bytes)
⏱️ Parse time: 51ms
📊 Throughput: 106.2 KB/s
⏱️ Total time: 51ms
| true
|
async_basic
| 202
| 5
|
[
"context_manager",
"class_definition",
"functools"
] | 0.652
| null |
example_async_context
|
async_context_cli.py
|
#!/usr/bin/env python3
"""Async Context CLI.
Async context managers and resource management patterns.
"""
import argparse
import asyncio
import sys
from typing import Any
class AsyncResource:
"""Simple async resource with enter/exit."""
def __init__(self, name: str) -> None:
self._name: str = name
self._open: bool = False
self._operations: list[str] = []
async def __aenter__(self) -> "AsyncResource":
self._open = True
self._operations.append(f"opened:{self._name}")
return self
async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> bool:
self._open = False
self._operations.append(f"closed:{self._name}")
return False
def is_open(self) -> bool:
return self._open
def get_name(self) -> str:
return self._name
def get_operations(self) -> list[str]:
return self._operations.copy()
async def read(self) -> str:
if not self._open:
raise RuntimeError("Resource not open")
self._operations.append(f"read:{self._name}")
return f"data_from_{self._name}"
async def write(self, data: str) -> None:
if not self._open:
raise RuntimeError("Resource not open")
self._operations.append(f"write:{self._name}:{data}")
class AsyncLock:
"""Simple async lock implementation."""
def __init__(self) -> None:
self._locked: bool = False
self._history: list[str] = []
async def __aenter__(self) -> "AsyncLock":
await self.acquire()
return self
async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> bool:
self.release()
return False
async def acquire(self) -> None:
self._locked = True
self._history.append("acquired")
def release(self) -> None:
self._locked = False
self._history.append("released")
def is_locked(self) -> bool:
return self._locked
def get_history(self) -> list[str]:
return self._history.copy()
class AsyncTransaction:
"""Async transaction with commit/rollback."""
def __init__(self) -> None:
self._operations: list[str] = []
self._committed: bool = False
self._rolled_back: bool = False
async def __aenter__(self) -> "AsyncTransaction":
self._operations.append("begin")
return self
async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> bool:
if exc_type is not None:
await self.rollback()
return True # Suppress the exception
if not self._committed:
await self.commit()
return False
async def commit(self) -> None:
self._operations.append("commit")
self._committed = True
async def rollback(self) -> None:
self._operations.append("rollback")
self._rolled_back = True
def add_operation(self, op: str) -> None:
self._operations.append(op)
def get_operations(self) -> list[str]:
return self._operations.copy()
def is_committed(self) -> bool:
return self._committed
def is_rolled_back(self) -> bool:
return self._rolled_back
class AsyncPool:
"""Simple async connection pool."""
def __init__(self, max_size: int) -> None:
self._max_size: int = max_size
self._available: list[int] = list(range(max_size))
self._in_use: list[int] = []
self._history: list[str] = []
async def acquire(self) -> int:
if not self._available:
raise RuntimeError("Pool exhausted")
conn = self._available.pop(0)
self._in_use.append(conn)
self._history.append(f"acquire:{conn}")
return conn
async def release(self, conn: int) -> None:
if conn in self._in_use:
self._in_use.remove(conn)
self._available.append(conn)
self._history.append(f"release:{conn}")
def available_count(self) -> int:
return len(self._available)
def in_use_count(self) -> int:
return len(self._in_use)
def get_history(self) -> list[str]:
return self._history.copy()
class PooledConnection:
"""Context manager for pooled connection."""
def __init__(self, pool: AsyncPool) -> None:
self._pool: AsyncPool = pool
self._conn: int = -1
async def __aenter__(self) -> int:
self._conn = await self._pool.acquire()
return self._conn
async def __aexit__(self, exc_type: Any, exc_val: Any, exc_tb: Any) -> bool:
await self._pool.release(self._conn)
return False
async def use_resource(name: str) -> list[str]:
"""Use async resource with context manager."""
async with AsyncResource(name) as res:
await res.read()
await res.write("test_data")
return res.get_operations()
async def use_lock() -> list[str]:
"""Use async lock with context manager."""
lock = AsyncLock()
async with lock:
pass # Critical section
return lock.get_history()
async def nested_resources(name1: str, name2: str) -> tuple[list[str], list[str]]:
"""Nested async resource usage."""
async with AsyncResource(name1) as res1:
async with AsyncResource(name2) as res2:
await res1.read()
await res2.write("nested_data")
return (res1.get_operations(), res2.get_operations())
async def transaction_success() -> list[str]:
"""Successful transaction."""
async with AsyncTransaction() as tx:
tx.add_operation("insert_a")
tx.add_operation("insert_b")
return tx.get_operations()
async def transaction_failure() -> list[str]:
"""Failed transaction with rollback."""
async with AsyncTransaction() as tx:
tx.add_operation("insert_a")
raise ValueError("Simulated failure") # Will trigger rollback
return tx.get_operations()
async def pool_operations(max_size: int) -> list[str]:
"""Pool acquire/release operations."""
pool = AsyncPool(max_size)
async with PooledConnection(pool) as conn1:
async with PooledConnection(pool) as conn2:
_ = conn1 # Use connections
_ = conn2
return pool.get_history()
async def sequential_resources(names: list[str]) -> list[str]:
"""Use resources sequentially."""
all_ops: list[str] = []
for name in names:
async with AsyncResource(name) as res:
data = await res.read()
all_ops.append(data)
return all_ops
async def resource_with_error(name: str, should_fail: bool) -> tuple[bool, list[str]]:
"""Resource that may encounter an error."""
try:
async with AsyncResource(name) as res:
await res.read()
if should_fail:
raise RuntimeError("Intentional failure")
await res.write("success")
return (True, res.get_operations())
except RuntimeError:
return (False, res.get_operations())
async def scoped_lock_operations(operations: list[str]) -> list[str]:
"""Perform operations within a lock scope."""
lock = AsyncLock()
results: list[str] = []
for op in operations:
async with lock:
results.append(f"locked:{op}")
return results
def run_async(coro: object) -> object:
"""Run async function synchronously."""
return asyncio.run(coro) # type: ignore
def main() -> int:
parser = argparse.ArgumentParser(description="Async context CLI")
subparsers = parser.add_subparsers(dest="command", help="Commands")
# resource
res_p = subparsers.add_parser("resource", help="Use async resource")
res_p.add_argument("name")
# transaction
tx_p = subparsers.add_parser("transaction", help="Run transaction")
tx_p.add_argument("--fail", action="store_true")
# pool
pool_p = subparsers.add_parser("pool", help="Test connection pool")
pool_p.add_argument("--size", type=int, default=5)
args = parser.parse_args()
if args.command == "resource":
ops = run_async(use_resource(args.name))
print(f"Operations: {ops}")
elif args.command == "transaction":
if args.fail:
ops = run_async(transaction_failure())
else:
ops = run_async(transaction_success())
print(f"Operations: {ops}")
elif args.command == "pool":
ops = run_async(pool_operations(args.size))
print(f"Operations: {ops}")
else:
parser.print_help()
return 0
if __name__ == "__main__":
sys.exit(main())
| false
|
async_context
| 301
| 0
|
[
"async_await",
"context_manager",
"class_definition",
"exception_handling",
"multiprocessing"
] | 0.946
|
Error: Unsupported type annotation: Constant(ExprConstant { range: 429..444, value: Str("AsyncResource"), kind: None })
|
|
example_async_context
|
test_async_context_cli.py
|
"""Tests for async_context_cli.py"""
import pytest
from async_context_cli import (
AsyncLock,
AsyncPool,
AsyncResource,
AsyncTransaction,
PooledConnection,
nested_resources,
pool_operations,
resource_with_error,
run_async,
scoped_lock_operations,
sequential_resources,
transaction_failure,
transaction_success,
use_lock,
use_resource,
)
class TestAsyncResource:
def test_open_close(self):
async def test():
async with AsyncResource("test") as res:
assert res.is_open()
assert res.get_name() == "test"
assert not res.is_open()
run_async(test())
def test_operations(self):
async def test():
async with AsyncResource("test") as res:
await res.read()
await res.write("data")
ops = res.get_operations()
assert "opened:test" in ops
assert "read:test" in ops
assert "write:test:data" in ops
assert "closed:test" in ops
run_async(test())
def test_read_when_closed(self):
async def test():
res = AsyncResource("test")
with pytest.raises(RuntimeError):
await res.read()
run_async(test())
class TestAsyncLock:
def test_acquire_release(self):
async def test():
lock = AsyncLock()
async with lock:
assert lock.is_locked()
assert not lock.is_locked()
run_async(test())
def test_history(self):
async def test():
lock = AsyncLock()
async with lock:
pass
history = lock.get_history()
assert history == ["acquired", "released"]
run_async(test())
class TestAsyncTransaction:
def test_commit(self):
async def test():
async with AsyncTransaction() as tx:
tx.add_operation("insert")
assert tx.is_committed()
assert not tx.is_rolled_back()
run_async(test())
def test_explicit_commit(self):
async def test():
async with AsyncTransaction() as tx:
tx.add_operation("insert")
await tx.commit()
ops = tx.get_operations()
assert ops.count("commit") == 1 # Only committed once
run_async(test())
def test_rollback_on_error(self):
async def test():
try:
async with AsyncTransaction() as tx:
tx.add_operation("insert")
raise ValueError("Test error")
except ValueError:
pass # Should be suppressed
assert tx.is_rolled_back()
assert not tx.is_committed()
run_async(test())
def test_operations(self):
async def test():
async with AsyncTransaction() as tx:
tx.add_operation("a")
tx.add_operation("b")
ops = tx.get_operations()
assert "begin" in ops
assert "a" in ops
assert "b" in ops
assert "commit" in ops
run_async(test())
class TestAsyncPool:
def test_acquire_release(self):
async def test():
pool = AsyncPool(3)
assert pool.available_count() == 3
conn = await pool.acquire()
assert pool.available_count() == 2
assert pool.in_use_count() == 1
await pool.release(conn)
assert pool.available_count() == 3
assert pool.in_use_count() == 0
run_async(test())
def test_pool_exhaustion(self):
async def test():
pool = AsyncPool(2)
await pool.acquire()
await pool.acquire()
with pytest.raises(RuntimeError):
await pool.acquire()
run_async(test())
class TestPooledConnection:
def test_context_manager(self):
async def test():
pool = AsyncPool(5)
async with PooledConnection(pool) as conn:
assert pool.in_use_count() == 1
_ = conn
assert pool.in_use_count() == 0
run_async(test())
class TestUseResource:
def test_use_resource(self):
ops = run_async(use_resource("myres"))
assert "opened:myres" in ops
assert "read:myres" in ops
assert "write:myres:test_data" in ops
assert "closed:myres" in ops
class TestUseLock:
def test_use_lock(self):
history = run_async(use_lock())
assert history == ["acquired", "released"]
class TestNestedResources:
def test_nested(self):
ops1, ops2 = run_async(nested_resources("outer", "inner"))
assert "opened:outer" in ops1
assert "read:outer" in ops1
assert "closed:outer" in ops1
assert "opened:inner" in ops2
assert "write:inner:nested_data" in ops2
assert "closed:inner" in ops2
class TestTransactions:
def test_success(self):
ops = run_async(transaction_success())
assert "begin" in ops
assert "insert_a" in ops
assert "insert_b" in ops
assert "commit" in ops
def test_failure(self):
ops = run_async(transaction_failure())
assert "begin" in ops
assert "insert_a" in ops
assert "rollback" in ops
class TestPoolOperations:
def test_operations(self):
ops = run_async(pool_operations(5))
# Should have acquire:0, acquire:1, release:1, release:0
assert "acquire:0" in ops
assert "acquire:1" in ops
assert "release:1" in ops
assert "release:0" in ops
class TestSequentialResources:
def test_sequential(self):
results = run_async(sequential_resources(["a", "b", "c"]))
assert results == ["data_from_a", "data_from_b", "data_from_c"]
class TestResourceWithError:
def test_success(self):
success, ops = run_async(resource_with_error("test", False))
assert success is True
assert "write:test:success" in ops
def test_failure(self):
success, ops = run_async(resource_with_error("test", True))
assert success is False
assert "read:test" in ops
assert "closed:test" in ops
class TestScopedLockOperations:
def test_scoped(self):
results = run_async(scoped_lock_operations(["op1", "op2", "op3"]))
assert results == ["locked:op1", "locked:op2", "locked:op3"]
class TestEdgeCases:
def test_empty_sequential(self):
results = run_async(sequential_resources([]))
assert results == []
def test_single_resource(self):
results = run_async(sequential_resources(["single"]))
assert results == ["data_from_single"]
| false
|
async_context
| 231
| 0
|
[
"async_await",
"context_manager",
"class_definition",
"exception_handling",
"multiprocessing"
] | 0.946
|
Error: Statement type not yet supported: AsyncFunctionDef
|
|
example_async_gather
|
async_gather_cli.py
|
#!/usr/bin/env python3
"""Async Gather CLI.
Concurrent async execution patterns with asyncio.gather.
"""
import argparse
import asyncio
import sys
async def async_delay_value(value: int, delay_ms: int) -> int:
"""Return value after delay."""
await asyncio.sleep(delay_ms / 1000.0)
return value
async def async_compute(x: int) -> int:
"""Simulate computation."""
await asyncio.sleep(0.001) # Minimal delay
return x * x
async def async_may_fail(value: int, should_fail: bool) -> int:
"""Operation that may fail."""
if should_fail:
raise ValueError(f"Failed for {value}")
return value * 2
async def gather_all(values: list[int]) -> list[int]:
"""Gather all results concurrently."""
tasks = [async_compute(v) for v in values]
results = await asyncio.gather(*tasks)
return list(results)
async def gather_with_exceptions(values: list[int], fail_indices: list[int]) -> list[object]:
"""Gather with return_exceptions=True."""
tasks = [async_may_fail(v, i in fail_indices) for i, v in enumerate(values)]
results = await asyncio.gather(*tasks, return_exceptions=True)
return list(results)
async def gather_first_completed(values: list[int]) -> int:
"""Get first completed result using wait."""
tasks = [asyncio.create_task(async_compute(v)) for v in values]
done, pending = await asyncio.wait(tasks, return_when=asyncio.FIRST_COMPLETED)
# Cancel pending tasks
for task in pending:
task.cancel()
# Return first completed result
for task in done:
return task.result()
return -1
async def gather_all_completed(values: list[int]) -> list[int]:
"""Wait for all tasks using wait."""
tasks = [asyncio.create_task(async_compute(v)) for v in values]
done, _ = await asyncio.wait(tasks)
return [task.result() for task in done]
async def gather_with_timeout(values: list[int], timeout_sec: float) -> list[int]:
"""Gather with timeout."""
tasks = [async_delay_value(v, v * 10) for v in values] # Delay proportional to value
try:
results = await asyncio.wait_for(asyncio.gather(*tasks), timeout=timeout_sec)
return list(results)
except TimeoutError:
return []
async def concurrent_map(values: list[int], transform: int) -> list[int]:
"""Map operation executed concurrently."""
async def add_transform(v: int) -> int:
await asyncio.sleep(0.001)
return v + transform
tasks = [add_transform(v) for v in values]
results = await asyncio.gather(*tasks)
return list(results)
async def concurrent_filter_compute(values: list[int], threshold: int) -> list[int]:
"""Filter values concurrently, then compute on results."""
async def check_and_compute(v: int) -> tuple[bool, int]:
await asyncio.sleep(0.001)
return (v >= threshold, v * v if v >= threshold else 0)
tasks = [check_and_compute(v) for v in values]
results = await asyncio.gather(*tasks)
return [r[1] for r in results if r[0]]
async def fan_out_fan_in(value: int) -> int:
"""Fan out to multiple operations, then combine."""
async def op1(v: int) -> int:
await asyncio.sleep(0.001)
return v + 1
async def op2(v: int) -> int:
await asyncio.sleep(0.001)
return v * 2
async def op3(v: int) -> int:
await asyncio.sleep(0.001)
return v**2
results = await asyncio.gather(op1(value), op2(value), op3(value))
return sum(results)
async def parallel_reduce(values: list[int]) -> int:
"""Parallel tree reduction."""
if len(values) == 0:
return 0
if len(values) == 1:
return values[0]
async def add(a: int, b: int) -> int:
await asyncio.sleep(0.001)
return a + b
# Pair up and reduce
current = values
while len(current) > 1:
tasks = []
for i in range(0, len(current) - 1, 2):
tasks.append(add(current[i], current[i + 1]))
if len(current) % 2 == 1:
# Capture last value in default argument to bind loop variable
last_val = current[-1]
async def identity(v: int = last_val) -> int:
return v
tasks.append(identity())
results = await asyncio.gather(*tasks)
current = list(results)
return current[0]
async def concurrent_find_any(values: list[int], target: int) -> bool:
"""Find if any value matches target concurrently."""
async def check(v: int) -> bool:
await asyncio.sleep(0.001)
return v == target
tasks = [check(v) for v in values]
results = await asyncio.gather(*tasks)
return any(results)
async def concurrent_all_positive(values: list[int]) -> bool:
"""Check if all values are positive concurrently."""
async def is_positive(v: int) -> bool:
await asyncio.sleep(0.001)
return v > 0
tasks = [is_positive(v) for v in values]
results = await asyncio.gather(*tasks)
return all(results)
async def batch_process(values: list[int], batch_size: int) -> list[int]:
"""Process values in concurrent batches."""
results: list[int] = []
for i in range(0, len(values), batch_size):
batch = values[i : i + batch_size]
batch_results = await gather_all(batch)
results.extend(batch_results)
return results
async def parallel_max(values: list[int]) -> int:
"""Find maximum value using parallel comparison."""
if len(values) == 0:
return 0
if len(values) == 1:
return values[0]
async def max_pair(a: int, b: int) -> int:
await asyncio.sleep(0.001)
return a if a > b else b
current = values
while len(current) > 1:
tasks = []
for i in range(0, len(current) - 1, 2):
tasks.append(max_pair(current[i], current[i + 1]))
if len(current) % 2 == 1:
tasks.append(asyncio.sleep(0, current[-1])) # type: ignore
results = await asyncio.gather(*tasks)
current = [r for r in results if isinstance(r, int)]
return current[0]
async def parallel_sum(values: list[int]) -> int:
"""Sum values using parallel reduction."""
if len(values) == 0:
return 0
results = await gather_all(values) # Compute squares
# Sum results
total = 0
for r in results:
total += r
return total
def run_async(coro: object) -> object:
"""Run async function synchronously."""
return asyncio.run(coro) # type: ignore
def main() -> int:
parser = argparse.ArgumentParser(description="Async gather CLI")
subparsers = parser.add_subparsers(dest="command", help="Commands")
# gather
gather_p = subparsers.add_parser("gather", help="Gather concurrent results")
gather_p.add_argument("values", type=int, nargs="+")
# fanout
fanout_p = subparsers.add_parser("fanout", help="Fan out computation")
fanout_p.add_argument("value", type=int)
# batch
batch_p = subparsers.add_parser("batch", help="Batch process")
batch_p.add_argument("values", type=int, nargs="+")
batch_p.add_argument("--size", type=int, default=3)
args = parser.parse_args()
if args.command == "gather":
result = run_async(gather_all(args.values))
print(f"Results: {result}")
elif args.command == "fanout":
result = run_async(fan_out_fan_in(args.value))
print(f"Combined result: {result}")
elif args.command == "batch":
result = run_async(batch_process(args.values, args.size))
print(f"Batch results: {result}")
else:
parser.print_help()
return 0
if __name__ == "__main__":
sys.exit(main())
| false
|
async_gather
| 263
| 0
|
[
"async_await",
"context_manager",
"exception_handling",
"multiprocessing",
"functools"
] | 0.946
|
Error: Statement type not yet supported: AsyncFunctionDef
|
|
example_async_gather
|
test_async_gather_cli.py
|
"""Tests for async_gather_cli.py"""
import pytest
from async_gather_cli import (
async_compute,
async_delay_value,
async_may_fail,
batch_process,
concurrent_all_positive,
concurrent_filter_compute,
concurrent_find_any,
concurrent_map,
fan_out_fan_in,
gather_all,
gather_all_completed,
gather_first_completed,
gather_with_exceptions,
parallel_sum,
run_async,
)
class TestAsyncCompute:
def test_compute(self):
result = run_async(async_compute(5))
assert result == 25
class TestAsyncDelayValue:
def test_returns_value(self):
result = run_async(async_delay_value(42, 10))
assert result == 42
class TestAsyncMayFail:
def test_success(self):
result = run_async(async_may_fail(5, False))
assert result == 10
def test_failure(self):
with pytest.raises(ValueError):
run_async(async_may_fail(5, True))
class TestGatherAll:
def test_basic(self):
result = run_async(gather_all([1, 2, 3, 4, 5]))
assert result == [1, 4, 9, 16, 25]
def test_empty(self):
result = run_async(gather_all([]))
assert result == []
def test_single(self):
result = run_async(gather_all([10]))
assert result == [100]
class TestGatherWithExceptions:
def test_no_failures(self):
result = run_async(gather_with_exceptions([1, 2, 3], []))
assert result == [2, 4, 6]
def test_with_failures(self):
result = run_async(gather_with_exceptions([1, 2, 3], [1]))
assert result[0] == 2
assert isinstance(result[1], ValueError)
assert result[2] == 6
class TestGatherFirstCompleted:
def test_returns_first(self):
result = run_async(gather_first_completed([1, 2, 3]))
assert result in [1, 4, 9] # Any squared value
class TestGatherAllCompleted:
def test_returns_all(self):
result = run_async(gather_all_completed([1, 2, 3]))
assert sorted(result) == [1, 4, 9]
class TestConcurrentMap:
def test_add_transform(self):
result = run_async(concurrent_map([1, 2, 3], 10))
assert result == [11, 12, 13]
def test_empty(self):
result = run_async(concurrent_map([], 10))
assert result == []
class TestConcurrentFilterCompute:
def test_filter_and_compute(self):
result = run_async(concurrent_filter_compute([1, 5, 3, 8, 2], 4))
assert sorted(result) == [25, 64] # 5^2=25, 8^2=64
class TestFanOutFanIn:
def test_combine(self):
# op1: 5+1=6, op2: 5*2=10, op3: 5^2=25
# sum = 6+10+25 = 41
result = run_async(fan_out_fan_in(5))
assert result == 41
class TestConcurrentFindAny:
def test_found(self):
result = run_async(concurrent_find_any([1, 2, 3, 4, 5], 3))
assert result is True
def test_not_found(self):
result = run_async(concurrent_find_any([1, 2, 3, 4, 5], 10))
assert result is False
class TestConcurrentAllPositive:
def test_all_positive(self):
result = run_async(concurrent_all_positive([1, 2, 3, 4, 5]))
assert result is True
def test_has_non_positive(self):
result = run_async(concurrent_all_positive([1, -2, 3]))
assert result is False
class TestBatchProcess:
def test_batches(self):
result = run_async(batch_process([1, 2, 3, 4, 5, 6], 2))
assert result == [1, 4, 9, 16, 25, 36]
def test_single_batch(self):
result = run_async(batch_process([1, 2], 5))
assert result == [1, 4]
class TestParallelSum:
def test_sum(self):
# Squares: 1, 4, 9, 16 -> sum = 30
result = run_async(parallel_sum([1, 2, 3, 4]))
assert result == 30
def test_empty(self):
result = run_async(parallel_sum([]))
assert result == 0
class TestEdgeCases:
def test_empty_gather(self):
result = run_async(gather_all([]))
assert result == []
def test_single_gather(self):
result = run_async(gather_all([7]))
assert result == [49]
def test_large_batch(self):
values = list(range(100))
result = run_async(batch_process(values, 10))
assert len(result) == 100
assert result[0] == 0
assert result[9] == 81
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_async_gather/test_async_gather_cli.py (4298 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_async_gather/test_async_gather_cli.rs (10181 bytes)
⏱️ Parse time: 51ms
📊 Throughput: 82.2 KB/s
⏱️ Total time: 51ms
| true
|
async_gather
| 162
| 5
|
[
"context_manager",
"class_definition",
"multiprocessing"
] | 0.652
| null |
example_async_iterator
|
async_iterator_cli.py
|
#!/usr/bin/env python3
"""Async Iterator CLI.
Async iterators and async generators.
"""
import argparse
import asyncio
import sys
from collections.abc import AsyncIterator
class AsyncRange:
"""Async range iterator."""
def __init__(self, start: int, end: int, step: int = 1) -> None:
self._start: int = start
self._end: int = end
self._step: int = step
self._current: int = start
def __aiter__(self) -> "AsyncRange":
return self
async def __anext__(self) -> int:
if (self._step > 0 and self._current >= self._end) or (
self._step < 0 and self._current <= self._end
):
raise StopAsyncIteration
value = self._current
self._current += self._step
return value
class AsyncCounter:
"""Async counter with limit."""
def __init__(self, limit: int) -> None:
self._limit: int = limit
self._count: int = 0
def __aiter__(self) -> "AsyncCounter":
return self
async def __anext__(self) -> int:
if self._count >= self._limit:
raise StopAsyncIteration
value = self._count
self._count += 1
return value
class AsyncFilter:
"""Async filtering iterator."""
def __init__(self, iterable: AsyncIterator[int], min_value: int) -> None:
self._iterable: AsyncIterator[int] = iterable
self._min_value: int = min_value
def __aiter__(self) -> "AsyncFilter":
return self
async def __anext__(self) -> int:
async for item in self._iterable:
if item >= self._min_value:
return item
raise StopAsyncIteration
class AsyncMap:
"""Async mapping iterator."""
def __init__(self, iterable: AsyncIterator[int], addend: int) -> None:
self._iterable: AsyncIterator[int] = iterable
self._addend: int = addend
def __aiter__(self) -> "AsyncMap":
return self
async def __anext__(self) -> int:
async for item in self._iterable:
return item + self._addend
raise StopAsyncIteration
class AsyncTake:
"""Async iterator that takes first n items."""
def __init__(self, iterable: AsyncIterator[int], n: int) -> None:
self._iterable: AsyncIterator[int] = iterable
self._remaining: int = n
def __aiter__(self) -> "AsyncTake":
return self
async def __anext__(self) -> int:
if self._remaining <= 0:
raise StopAsyncIteration
self._remaining -= 1
return await self._iterable.__anext__()
class AsyncSkip:
"""Async iterator that skips first n items."""
def __init__(self, iterable: AsyncIterator[int], n: int) -> None:
self._iterable: AsyncIterator[int] = iterable
self._to_skip: int = n
self._skipped: bool = False
def __aiter__(self) -> "AsyncSkip":
return self
async def __anext__(self) -> int:
if not self._skipped:
for _ in range(self._to_skip):
try:
await self._iterable.__anext__()
except StopAsyncIteration:
raise
self._skipped = True
return await self._iterable.__anext__()
async def async_generator_range(start: int, end: int) -> AsyncIterator[int]:
"""Async generator for range."""
for i in range(start, end):
yield i
async def async_generator_squares(limit: int) -> AsyncIterator[int]:
"""Async generator for squares."""
for i in range(limit):
yield i * i
async def async_generator_fibonacci(limit: int) -> AsyncIterator[int]:
"""Async generator for Fibonacci sequence."""
a, b = 0, 1
for _ in range(limit):
yield a
a, b = b, a + b
async def async_generator_filter(source: AsyncIterator[int], threshold: int) -> AsyncIterator[int]:
"""Async generator that filters values."""
async for item in source:
if item >= threshold:
yield item
async def async_generator_map(source: AsyncIterator[int], addend: int) -> AsyncIterator[int]:
"""Async generator that transforms values."""
async for item in source:
yield item + addend
async def async_generator_take(source: AsyncIterator[int], n: int) -> AsyncIterator[int]:
"""Async generator that takes first n items."""
count = 0
async for item in source:
if count >= n:
break
yield item
count += 1
async def async_generator_skip(source: AsyncIterator[int], n: int) -> AsyncIterator[int]:
"""Async generator that skips first n items."""
count = 0
async for item in source:
if count >= n:
yield item
count += 1
async def async_generator_chain(
first: AsyncIterator[int], second: AsyncIterator[int]
) -> AsyncIterator[int]:
"""Chain two async iterators."""
async for item in first:
yield item
async for item in second:
yield item
async def async_generator_enumerate(source: AsyncIterator[int]) -> AsyncIterator[tuple[int, int]]:
"""Async enumerate."""
index = 0
async for item in source:
yield (index, item)
index += 1
async def async_generator_zip(
first: AsyncIterator[int], second: AsyncIterator[int]
) -> AsyncIterator[tuple[int, int]]:
"""Async zip of two iterators."""
while True:
try:
a = await first.__anext__()
b = await second.__anext__()
yield (a, b)
except StopAsyncIteration:
break
async def collect_async(iterator: AsyncIterator[int]) -> list[int]:
"""Collect async iterator into list."""
result: list[int] = []
async for item in iterator:
result.append(item)
return result
async def sum_async(iterator: AsyncIterator[int]) -> int:
"""Sum values from async iterator."""
total = 0
async for item in iterator:
total += item
return total
async def count_async(iterator: AsyncIterator[int]) -> int:
"""Count items in async iterator."""
count = 0
async for _ in iterator:
count += 1
return count
async def any_async(iterator: AsyncIterator[int], value: int) -> bool:
"""Check if any item equals value."""
async for item in iterator:
if item == value:
return True
return False
async def all_positive_async(iterator: AsyncIterator[int]) -> bool:
"""Check if all items are positive."""
async for item in iterator:
if item <= 0:
return False
return True
async def first_async(iterator: AsyncIterator[int]) -> int:
"""Get first item or -1 if empty."""
async for item in iterator:
return item
return -1
async def last_async(iterator: AsyncIterator[int]) -> int:
"""Get last item or -1 if empty."""
result = -1
async for item in iterator:
result = item
return result
def run_async(coro: object) -> object:
"""Run async function synchronously."""
return asyncio.run(coro) # type: ignore
def main() -> int:
parser = argparse.ArgumentParser(description="Async iterator CLI")
subparsers = parser.add_subparsers(dest="command", help="Commands")
# range
range_p = subparsers.add_parser("range", help="Async range")
range_p.add_argument("start", type=int)
range_p.add_argument("end", type=int)
# squares
squares_p = subparsers.add_parser("squares", help="Async squares")
squares_p.add_argument("limit", type=int)
# fibonacci
fib_p = subparsers.add_parser("fibonacci", help="Async fibonacci")
fib_p.add_argument("limit", type=int)
args = parser.parse_args()
if args.command == "range":
result = run_async(collect_async(async_generator_range(args.start, args.end)))
print(f"Range: {result}")
elif args.command == "squares":
result = run_async(collect_async(async_generator_squares(args.limit)))
print(f"Squares: {result}")
elif args.command == "fibonacci":
result = run_async(collect_async(async_generator_fibonacci(args.limit)))
print(f"Fibonacci: {result}")
else:
parser.print_help()
return 0
if __name__ == "__main__":
sys.exit(main())
| false
|
async_iterator
| 306
| 0
|
[
"async_await",
"generator",
"context_manager",
"class_definition",
"exception_handling"
] | 0.946
|
Error: Statement type not yet supported: AsyncFor
|
|
example_async_iterator
|
test_async_iterator_cli.py
|
"""Tests for async_iterator_cli.py"""
from async_iterator_cli import (
AsyncCounter,
AsyncRange,
all_positive_async,
any_async,
async_generator_chain,
async_generator_enumerate,
async_generator_fibonacci,
async_generator_filter,
async_generator_map,
async_generator_range,
async_generator_skip,
async_generator_squares,
async_generator_take,
async_generator_zip,
collect_async,
count_async,
first_async,
last_async,
run_async,
sum_async,
)
class TestAsyncRange:
def test_basic(self):
async def test():
results = []
async for i in AsyncRange(0, 5):
results.append(i)
return results
assert run_async(test()) == [0, 1, 2, 3, 4]
def test_with_step(self):
async def test():
results = []
async for i in AsyncRange(0, 10, 2):
results.append(i)
return results
assert run_async(test()) == [0, 2, 4, 6, 8]
def test_negative_step(self):
async def test():
results = []
async for i in AsyncRange(5, 0, -1):
results.append(i)
return results
assert run_async(test()) == [5, 4, 3, 2, 1]
def test_empty(self):
async def test():
results = []
async for i in AsyncRange(5, 5):
results.append(i)
return results
assert run_async(test()) == []
class TestAsyncCounter:
def test_basic(self):
async def test():
results = []
async for i in AsyncCounter(5):
results.append(i)
return results
assert run_async(test()) == [0, 1, 2, 3, 4]
def test_zero(self):
async def test():
results = []
async for i in AsyncCounter(0):
results.append(i)
return results
assert run_async(test()) == []
class TestAsyncGeneratorRange:
def test_basic(self):
result = run_async(collect_async(async_generator_range(0, 5)))
assert result == [0, 1, 2, 3, 4]
class TestAsyncGeneratorSquares:
def test_basic(self):
result = run_async(collect_async(async_generator_squares(5)))
assert result == [0, 1, 4, 9, 16]
class TestAsyncGeneratorFibonacci:
def test_basic(self):
result = run_async(collect_async(async_generator_fibonacci(8)))
assert result == [0, 1, 1, 2, 3, 5, 8, 13]
class TestAsyncGeneratorFilter:
def test_filter(self):
async def test():
source = async_generator_range(0, 10)
filtered = async_generator_filter(source, 5)
return await collect_async(filtered)
assert run_async(test()) == [5, 6, 7, 8, 9]
class TestAsyncGeneratorMap:
def test_map(self):
async def test():
source = async_generator_range(0, 5)
mapped = async_generator_map(source, 10)
return await collect_async(mapped)
assert run_async(test()) == [10, 11, 12, 13, 14]
class TestAsyncGeneratorTake:
def test_take(self):
async def test():
source = async_generator_range(0, 100)
taken = async_generator_take(source, 5)
return await collect_async(taken)
assert run_async(test()) == [0, 1, 2, 3, 4]
def test_take_more_than_available(self):
async def test():
source = async_generator_range(0, 3)
taken = async_generator_take(source, 10)
return await collect_async(taken)
assert run_async(test()) == [0, 1, 2]
class TestAsyncGeneratorSkip:
def test_skip(self):
async def test():
source = async_generator_range(0, 10)
skipped = async_generator_skip(source, 5)
return await collect_async(skipped)
assert run_async(test()) == [5, 6, 7, 8, 9]
class TestAsyncGeneratorChain:
def test_chain(self):
async def test():
first = async_generator_range(0, 3)
second = async_generator_range(10, 13)
chained = async_generator_chain(first, second)
return await collect_async(chained)
assert run_async(test()) == [0, 1, 2, 10, 11, 12]
class TestAsyncGeneratorEnumerate:
def test_enumerate(self):
async def test():
source = async_generator_range(10, 13)
enumerated = async_generator_enumerate(source)
results = []
async for item in enumerated:
results.append(item)
return results
assert run_async(test()) == [(0, 10), (1, 11), (2, 12)]
class TestAsyncGeneratorZip:
def test_zip(self):
async def test():
first = async_generator_range(1, 4)
second = async_generator_range(10, 13)
zipped = async_generator_zip(first, second)
results = []
async for item in zipped:
results.append(item)
return results
assert run_async(test()) == [(1, 10), (2, 11), (3, 12)]
class TestCollectAsync:
def test_collect(self):
result = run_async(collect_async(async_generator_range(0, 5)))
assert result == [0, 1, 2, 3, 4]
def test_collect_empty(self):
result = run_async(collect_async(async_generator_range(0, 0)))
assert result == []
class TestSumAsync:
def test_sum(self):
result = run_async(sum_async(async_generator_range(1, 6)))
assert result == 15 # 1+2+3+4+5
def test_sum_empty(self):
result = run_async(sum_async(async_generator_range(0, 0)))
assert result == 0
class TestCountAsync:
def test_count(self):
result = run_async(count_async(async_generator_range(0, 10)))
assert result == 10
def test_count_empty(self):
result = run_async(count_async(async_generator_range(0, 0)))
assert result == 0
class TestAnyAsync:
def test_found(self):
result = run_async(any_async(async_generator_range(0, 10), 5))
assert result is True
def test_not_found(self):
result = run_async(any_async(async_generator_range(0, 10), 100))
assert result is False
class TestAllPositiveAsync:
def test_all_positive(self):
result = run_async(all_positive_async(async_generator_range(1, 10)))
assert result is True
def test_has_non_positive(self):
async def test():
async def gen():
for i in [1, 2, -3, 4]:
yield i
return await all_positive_async(gen())
assert run_async(test()) is False
class TestFirstLastAsync:
def test_first(self):
result = run_async(first_async(async_generator_range(5, 10)))
assert result == 5
def test_first_empty(self):
result = run_async(first_async(async_generator_range(0, 0)))
assert result == -1
def test_last(self):
result = run_async(last_async(async_generator_range(5, 10)))
assert result == 9
def test_last_empty(self):
result = run_async(last_async(async_generator_range(0, 0)))
assert result == -1
class TestEdgeCases:
def test_empty_generators(self):
assert run_async(collect_async(async_generator_range(0, 0))) == []
assert run_async(collect_async(async_generator_squares(0))) == []
assert run_async(collect_async(async_generator_fibonacci(0))) == []
def test_single_element(self):
assert run_async(collect_async(async_generator_range(5, 6))) == [5]
assert run_async(sum_async(async_generator_range(5, 6))) == 5
assert run_async(first_async(async_generator_range(5, 6))) == 5
| false
|
async_iterator
| 256
| 0
|
[
"async_await",
"generator",
"class_definition"
] | 0.946
|
Error: Statement type not yet supported: AsyncFunctionDef
|
|
example_async_queue
|
async_queue_cli.py
|
#!/usr/bin/env python3
"""Async Queue CLI.
Async queue patterns for producer-consumer scenarios.
"""
import argparse
import asyncio
import sys
from typing import Generic, TypeVar
T = TypeVar("T")
class AsyncQueue(Generic[T]):
"""Simple async queue implementation."""
def __init__(self, maxsize: int = 0) -> None:
self._items: list[T] = []
self._maxsize: int = maxsize
self._closed: bool = False
async def put(self, item: T) -> None:
"""Put item into queue."""
if self._closed:
raise RuntimeError("Queue is closed")
if self._maxsize > 0:
while len(self._items) >= self._maxsize:
await asyncio.sleep(0.001) # Simple backpressure
self._items.append(item)
async def get(self) -> T:
"""Get item from queue."""
while len(self._items) == 0:
if self._closed:
raise RuntimeError("Queue is closed and empty")
await asyncio.sleep(0.001)
return self._items.pop(0)
def get_nowait(self) -> T:
"""Get item without waiting."""
if len(self._items) == 0:
raise RuntimeError("Queue is empty")
return self._items.pop(0)
def put_nowait(self, item: T) -> None:
"""Put item without waiting."""
if self._closed:
raise RuntimeError("Queue is closed")
if self._maxsize > 0 and len(self._items) >= self._maxsize:
raise RuntimeError("Queue is full")
self._items.append(item)
def qsize(self) -> int:
"""Get current queue size."""
return len(self._items)
def empty(self) -> bool:
"""Check if queue is empty."""
return len(self._items) == 0
def full(self) -> bool:
"""Check if queue is full."""
return self._maxsize > 0 and len(self._items) >= self._maxsize
def close(self) -> None:
"""Close the queue."""
self._closed = True
def is_closed(self) -> bool:
"""Check if queue is closed."""
return self._closed
class AsyncChannel(Generic[T]):
"""Async channel for communication between tasks."""
def __init__(self) -> None:
self._queue: AsyncQueue[T] = AsyncQueue()
self._producers: int = 0
self._consumers: int = 0
def add_producer(self) -> None:
"""Register a producer."""
self._producers += 1
def remove_producer(self) -> None:
"""Unregister a producer."""
self._producers -= 1
if self._producers == 0:
self._queue.close()
def add_consumer(self) -> None:
"""Register a consumer."""
self._consumers += 1
def remove_consumer(self) -> None:
"""Unregister a consumer."""
self._consumers -= 1
async def send(self, item: T) -> None:
"""Send item to channel."""
await self._queue.put(item)
async def receive(self) -> T:
"""Receive item from channel."""
return await self._queue.get()
def is_closed(self) -> bool:
"""Check if channel is closed."""
return self._queue.is_closed()
async def producer(queue: AsyncQueue[int], values: list[int]) -> int:
"""Producer that puts values into queue."""
count = 0
for value in values:
await queue.put(value)
count += 1
return count
async def consumer(queue: AsyncQueue[int], count: int) -> list[int]:
"""Consumer that gets values from queue."""
results: list[int] = []
for _ in range(count):
try:
item = await queue.get()
results.append(item)
except RuntimeError:
break
return results
async def worker(queue: AsyncQueue[int], results: list[int]) -> int:
"""Worker that processes items from queue."""
processed = 0
while True:
try:
item = queue.get_nowait()
results.append(item * 2)
processed += 1
except RuntimeError:
break
return processed
async def simple_producer_consumer(values: list[int]) -> list[int]:
"""Simple producer-consumer pattern."""
queue: AsyncQueue[int] = AsyncQueue()
# Produce all items
for v in values:
await queue.put(v)
# Consume all items
results: list[int] = []
while not queue.empty():
item = await queue.get()
results.append(item)
return results
async def bounded_queue_test(values: list[int], maxsize: int) -> list[int]:
"""Test bounded queue behavior."""
queue: AsyncQueue[int] = AsyncQueue(maxsize)
# Try to fill queue
for v in values[:maxsize]:
queue.put_nowait(v)
# Drain queue
results: list[int] = []
while not queue.empty():
results.append(queue.get_nowait())
return results
async def transform_pipeline(values: list[int]) -> list[int]:
"""Transform values through queue pipeline."""
queue1: AsyncQueue[int] = AsyncQueue()
queue2: AsyncQueue[int] = AsyncQueue()
# Stage 1: Add values to first queue
for v in values:
await queue1.put(v)
# Stage 2: Transform and move to second queue
while not queue1.empty():
item = await queue1.get()
await queue2.put(item * 2)
# Stage 3: Collect results
results: list[int] = []
while not queue2.empty():
results.append(await queue2.get())
return results
async def filter_pipeline(values: list[int], threshold: int) -> list[int]:
"""Filter values through queue pipeline."""
input_queue: AsyncQueue[int] = AsyncQueue()
output_queue: AsyncQueue[int] = AsyncQueue()
# Add input values
for v in values:
await input_queue.put(v)
# Filter
while not input_queue.empty():
item = await input_queue.get()
if item >= threshold:
await output_queue.put(item)
# Collect results
results: list[int] = []
while not output_queue.empty():
results.append(await output_queue.get())
return results
async def accumulator_pattern(values: list[int]) -> int:
"""Accumulate values from queue."""
queue: AsyncQueue[int] = AsyncQueue()
# Add values
for v in values:
await queue.put(v)
# Accumulate
total = 0
while not queue.empty():
total += await queue.get()
return total
async def batch_collector(values: list[int], batch_size: int) -> list[list[int]]:
"""Collect values into batches."""
queue: AsyncQueue[int] = AsyncQueue()
# Add values
for v in values:
await queue.put(v)
# Collect batches
batches: list[list[int]] = []
current_batch: list[int] = []
while not queue.empty():
item = await queue.get()
current_batch.append(item)
if len(current_batch) >= batch_size:
batches.append(current_batch)
current_batch = []
if current_batch:
batches.append(current_batch)
return batches
async def priority_queue_simulation(items: list[tuple[int, int]]) -> list[int]:
"""Simulate priority queue using sorted insert."""
queue: AsyncQueue[tuple[int, int]] = AsyncQueue()
# Add items (priority, value)
for item in items:
await queue.put(item)
# Drain and sort by priority
all_items: list[tuple[int, int]] = []
while not queue.empty():
all_items.append(await queue.get())
# Sort by priority (lower is higher priority)
all_items.sort(key=lambda x: x[0])
return [item[1] for item in all_items]
async def round_robin_queues(values: list[int], num_queues: int) -> list[list[int]]:
"""Distribute values across multiple queues."""
queues: list[AsyncQueue[int]] = [AsyncQueue() for _ in range(num_queues)]
# Distribute values
for i, v in enumerate(values):
await queues[i % num_queues].put(v)
# Collect from each queue
results: list[list[int]] = []
for q in queues:
queue_items: list[int] = []
while not q.empty():
queue_items.append(await q.get())
results.append(queue_items)
return results
async def dedup_queue(values: list[int]) -> list[int]:
"""Remove duplicates while maintaining order."""
queue: AsyncQueue[int] = AsyncQueue()
seen: set[int] = set()
# Add values
for v in values:
await queue.put(v)
# Dedup
results: list[int] = []
while not queue.empty():
item = await queue.get()
if item not in seen:
seen.add(item)
results.append(item)
return results
def run_async(coro: object) -> object:
"""Run async function synchronously."""
return asyncio.run(coro) # type: ignore
def main() -> int:
parser = argparse.ArgumentParser(description="Async queue CLI")
subparsers = parser.add_subparsers(dest="command", help="Commands")
# transform
transform_p = subparsers.add_parser("transform", help="Transform pipeline")
transform_p.add_argument("values", type=int, nargs="+")
# filter
filter_p = subparsers.add_parser("filter", help="Filter pipeline")
filter_p.add_argument("values", type=int, nargs="+")
filter_p.add_argument("--threshold", type=int, default=5)
# batch
batch_p = subparsers.add_parser("batch", help="Batch collector")
batch_p.add_argument("values", type=int, nargs="+")
batch_p.add_argument("--size", type=int, default=3)
args = parser.parse_args()
if args.command == "transform":
result = run_async(transform_pipeline(args.values))
print(f"Transformed: {result}")
elif args.command == "filter":
result = run_async(filter_pipeline(args.values, args.threshold))
print(f"Filtered: {result}")
elif args.command == "batch":
result = run_async(batch_collector(args.values, args.size))
print(f"Batches: {result}")
else:
parser.print_help()
return 0
if __name__ == "__main__":
sys.exit(main())
| false
|
async_queue
| 369
| 0
|
[
"async_await",
"lambda",
"class_definition",
"exception_handling"
] | 0.946
|
Type inference hints:
Hint: int for variable 'count' [Medium] (usage patterns suggest this type)
Type inference hints:
Hint: list[Any] for variable 'results' [High] (usage patterns suggest this type)
Type inference hints:
Hint: int for variable 'processed' [Medium] (usage patterns suggest this type)
Hint: int for variable 'item' [Medium] (usage patterns suggest this type)
Hint: list[Any] for variable 'results' [High] (usage patterns suggest this type)
Type inference hints:
Hint: list[Any] for
|
|
example_async_queue
|
test_async_queue_cli.py
|
"""Tests for async_queue_cli.py"""
from async_queue_cli import (
AsyncChannel,
AsyncQueue,
accumulator_pattern,
batch_collector,
bounded_queue_test,
dedup_queue,
filter_pipeline,
priority_queue_simulation,
producer,
round_robin_queues,
run_async,
simple_producer_consumer,
transform_pipeline,
worker,
)
class TestAsyncQueue:
def test_put_get(self):
async def test():
q: AsyncQueue[int] = AsyncQueue()
await q.put(1)
await q.put(2)
return [await q.get(), await q.get()]
assert run_async(test()) == [1, 2]
def test_put_get_nowait(self):
async def test():
q: AsyncQueue[int] = AsyncQueue()
q.put_nowait(1)
q.put_nowait(2)
return [q.get_nowait(), q.get_nowait()]
assert run_async(test()) == [1, 2]
def test_empty(self):
async def test():
q: AsyncQueue[int] = AsyncQueue()
empty_before = q.empty()
await q.put(1)
empty_after = q.empty()
return (empty_before, empty_after)
assert run_async(test()) == (True, False)
def test_qsize(self):
async def test():
q: AsyncQueue[int] = AsyncQueue()
sizes = [q.qsize()]
await q.put(1)
sizes.append(q.qsize())
await q.put(2)
sizes.append(q.qsize())
await q.get()
sizes.append(q.qsize())
return sizes
assert run_async(test()) == [0, 1, 2, 1]
def test_close(self):
async def test():
q: AsyncQueue[int] = AsyncQueue()
q.close()
return q.is_closed()
assert run_async(test()) is True
def test_put_after_close(self):
async def test():
q: AsyncQueue[int] = AsyncQueue()
q.close()
try:
await q.put(1)
return False
except RuntimeError:
return True
assert run_async(test()) is True
class TestBoundedQueue:
def test_full(self):
async def test():
q: AsyncQueue[int] = AsyncQueue(maxsize=2)
q.put_nowait(1)
q.put_nowait(2)
return q.full()
assert run_async(test()) is True
def test_put_nowait_full_raises(self):
async def test():
q: AsyncQueue[int] = AsyncQueue(maxsize=2)
q.put_nowait(1)
q.put_nowait(2)
try:
q.put_nowait(3)
return False
except RuntimeError:
return True
assert run_async(test()) is True
class TestAsyncChannel:
def test_send_receive(self):
async def test():
ch: AsyncChannel[int] = AsyncChannel()
await ch.send(42)
return await ch.receive()
assert run_async(test()) == 42
def test_producer_consumer_tracking(self):
async def test():
ch: AsyncChannel[int] = AsyncChannel()
ch.add_producer()
ch.add_consumer()
await ch.send(1)
await ch.send(2)
ch.remove_producer()
return ch.is_closed()
assert run_async(test()) is True
class TestProducerConsumer:
def test_producer(self):
async def test():
q: AsyncQueue[int] = AsyncQueue()
count = await producer(q, [1, 2, 3, 4, 5])
return count
assert run_async(test()) == 5
def test_simple_producer_consumer(self):
result = run_async(simple_producer_consumer([1, 2, 3, 4, 5]))
assert result == [1, 2, 3, 4, 5]
class TestWorker:
def test_worker_processes_items(self):
async def test():
q: AsyncQueue[int] = AsyncQueue()
q.put_nowait(1)
q.put_nowait(2)
q.put_nowait(3)
results: list[int] = []
processed = await worker(q, results)
return (processed, results)
count, results = run_async(test())
assert count == 3
assert results == [2, 4, 6] # Doubled values
class TestBoundedQueueTest:
def test_bounded(self):
result = run_async(bounded_queue_test([1, 2, 3, 4, 5], 3))
assert result == [1, 2, 3]
class TestTransformPipeline:
def test_transform(self):
result = run_async(transform_pipeline([1, 2, 3]))
assert result == [2, 4, 6]
class TestFilterPipeline:
def test_filter(self):
result = run_async(filter_pipeline([1, 5, 3, 8, 2], 4))
assert result == [5, 8]
class TestAccumulatorPattern:
def test_accumulate(self):
result = run_async(accumulator_pattern([1, 2, 3, 4, 5]))
assert result == 15
class TestBatchCollector:
def test_even_batches(self):
result = run_async(batch_collector([1, 2, 3, 4, 5, 6], 3))
assert result == [[1, 2, 3], [4, 5, 6]]
def test_uneven_batches(self):
result = run_async(batch_collector([1, 2, 3, 4, 5], 3))
assert result == [[1, 2, 3], [4, 5]]
def test_single_batch(self):
result = run_async(batch_collector([1, 2], 5))
assert result == [[1, 2]]
class TestPriorityQueueSimulation:
def test_priority_order(self):
items = [(3, 30), (1, 10), (2, 20)]
result = run_async(priority_queue_simulation(items))
assert result == [10, 20, 30] # Ordered by priority
class TestRoundRobinQueues:
def test_distribution(self):
result = run_async(round_robin_queues([1, 2, 3, 4, 5, 6], 3))
assert result == [[1, 4], [2, 5], [3, 6]]
class TestDedupQueue:
def test_dedup(self):
result = run_async(dedup_queue([1, 2, 2, 3, 1, 4, 3, 5]))
assert result == [1, 2, 3, 4, 5]
def test_no_duplicates(self):
result = run_async(dedup_queue([1, 2, 3]))
assert result == [1, 2, 3]
class TestEdgeCases:
def test_empty_queue(self):
result = run_async(simple_producer_consumer([]))
assert result == []
def test_single_item(self):
result = run_async(simple_producer_consumer([42]))
assert result == [42]
def test_empty_transform(self):
result = run_async(transform_pipeline([]))
assert result == []
def test_empty_batch(self):
result = run_async(batch_collector([], 3))
assert result == []
| false
|
async_queue
| 225
| 0
|
[
"async_await",
"class_definition",
"exception_handling"
] | 0.946
|
Error: Statement type not yet supported: AsyncFunctionDef
|
|
example_backup_tool
|
backup_cli.py
|
#!/usr/bin/env python3
"""Simple backup tool CLI.
Create and restore file backups with timestamps.
"""
import argparse
import hashlib
import os
import shutil
import sys
from datetime import datetime
def get_timestamp() -> str:
"""Get current timestamp for backup naming."""
return datetime.now().strftime("%Y%m%d_%H%M%S_%f")
def parse_timestamp(name: str) -> datetime | None:
"""Parse timestamp from backup name."""
# Extract timestamp pattern YYYYMMDD_HHMMSS_ffffff
parts = name.split("_")
if len(parts) < 3:
return None
try:
date_part = parts[-3]
time_part = parts[-2]
micro_part = parts[-1].split(".")[0] # Remove extension
return datetime.strptime(f"{date_part}_{time_part}_{micro_part}", "%Y%m%d_%H%M%S_%f")
except (ValueError, IndexError):
return None
def get_file_checksum(path: str) -> str:
"""Calculate SHA256 checksum of file."""
hasher = hashlib.sha256()
try:
with open(path, "rb") as f:
while chunk := f.read(65536):
hasher.update(chunk)
return hasher.hexdigest()[:16] # Short checksum
except OSError:
return ""
def create_backup_name(source: str, backup_dir: str) -> str:
"""Create backup filename with timestamp."""
basename = os.path.basename(source)
name, ext = os.path.splitext(basename)
timestamp = get_timestamp()
backup_name = f"{name}_{timestamp}{ext}"
return os.path.join(backup_dir, backup_name)
def backup_file(source: str, backup_dir: str, verify: bool = False) -> str:
"""Create a backup of a file.
Returns backup path on success, empty string on failure.
"""
if not os.path.isfile(source):
return ""
if not os.path.exists(backup_dir):
os.makedirs(backup_dir)
backup_path = create_backup_name(source, backup_dir)
try:
shutil.copy2(source, backup_path)
if verify:
src_checksum = get_file_checksum(source)
bak_checksum = get_file_checksum(backup_path)
if src_checksum != bak_checksum:
os.remove(backup_path)
return ""
return backup_path
except OSError:
return ""
def list_backups(backup_dir: str, pattern: str = "") -> list[tuple[str, datetime]]:
"""List backups in directory, optionally filtered by pattern.
Returns list of (path, timestamp) tuples sorted by date.
"""
if not os.path.isdir(backup_dir):
return []
backups = []
for entry in os.listdir(backup_dir):
if pattern and pattern not in entry:
continue
path = os.path.join(backup_dir, entry)
if not os.path.isfile(path):
continue
timestamp = parse_timestamp(entry)
if timestamp:
backups.append((path, timestamp))
return sorted(backups, key=lambda x: x[1], reverse=True)
def get_latest_backup(backup_dir: str, pattern: str = "") -> str:
"""Get the most recent backup matching pattern."""
backups = list_backups(backup_dir, pattern)
if not backups:
return ""
return backups[0][0]
def restore_backup(backup_path: str, dest: str, verify: bool = False) -> bool:
"""Restore a backup to destination."""
if not os.path.isfile(backup_path):
return False
try:
shutil.copy2(backup_path, dest)
if verify:
src_checksum = get_file_checksum(backup_path)
dst_checksum = get_file_checksum(dest)
if src_checksum != dst_checksum:
return False
return True
except OSError:
return False
def prune_backups(backup_dir: str, pattern: str, keep: int) -> int:
"""Remove old backups, keeping only the most recent N.
Returns number of backups removed.
"""
backups = list_backups(backup_dir, pattern)
if len(backups) <= keep:
return 0
removed = 0
for path, _ in backups[keep:]:
try:
os.remove(path)
removed += 1
except OSError:
pass
return removed
def main() -> int:
parser = argparse.ArgumentParser(description="Simple file backup tool")
subparsers = parser.add_subparsers(dest="command", help="Command")
# Backup command
backup_parser = subparsers.add_parser("backup", help="Create a backup")
backup_parser.add_argument("source", help="File to backup")
backup_parser.add_argument("-d", "--dest", default="./backups", help="Backup directory")
backup_parser.add_argument("--verify", action="store_true", help="Verify backup checksum")
# List command
list_parser = subparsers.add_parser("list", help="List backups")
list_parser.add_argument("-d", "--dir", default="./backups", help="Backup directory")
list_parser.add_argument("-p", "--pattern", default="", help="Filter by pattern")
# Restore command
restore_parser = subparsers.add_parser("restore", help="Restore a backup")
restore_parser.add_argument("backup", help="Backup file or 'latest'")
restore_parser.add_argument("dest", help="Destination path")
restore_parser.add_argument("-d", "--dir", default="./backups", help="Backup directory")
restore_parser.add_argument("-p", "--pattern", default="", help="Pattern for latest lookup")
restore_parser.add_argument("--verify", action="store_true", help="Verify restore checksum")
# Prune command
prune_parser = subparsers.add_parser("prune", help="Remove old backups")
prune_parser.add_argument("-d", "--dir", default="./backups", help="Backup directory")
prune_parser.add_argument("-p", "--pattern", default="", help="Filter by pattern")
prune_parser.add_argument("-k", "--keep", type=int, default=5, help="Number of backups to keep")
args = parser.parse_args()
if args.command == "backup":
result = backup_file(args.source, args.dest, args.verify)
if result:
print(f"Created backup: {result}")
return 0
print("Backup failed", file=sys.stderr)
return 1
elif args.command == "list":
backups = list_backups(args.dir, args.pattern)
if not backups:
print("No backups found")
return 0
for path, timestamp in backups:
name = os.path.basename(path)
size = os.path.getsize(path)
print(f"{timestamp.isoformat()} {size:>10} {name}")
return 0
elif args.command == "restore":
if args.backup == "latest":
backup_path = get_latest_backup(args.dir, args.pattern)
if not backup_path:
print("No backups found", file=sys.stderr)
return 1
else:
backup_path = args.backup
if restore_backup(backup_path, args.dest, args.verify):
print(f"Restored {backup_path} to {args.dest}")
return 0
print("Restore failed", file=sys.stderr)
return 1
elif args.command == "prune":
removed = prune_backups(args.dir, args.pattern, args.keep)
print(f"Removed {removed} old backups")
return 0
else:
parser.print_help()
return 1
if __name__ == "__main__":
sys.exit(main())
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_backup_tool/backup_cli.py (7237 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_backup_tool/backup_cli.rs (13411 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_backup_tool/Cargo.toml (5 dependencies)
⏱️ Parse time: 54ms
📊 Throughput: 130.0 KB/s
⏱️ Total time: 54ms
| true
|
backup_tool
| 232
| 6
|
[
"lambda",
"context_manager",
"exception_handling",
"walrus_operator"
] | 0.85
| null |
example_backup_tool
|
test_backup_cli.py
|
"""Tests for backup_cli.py"""
import os
import tempfile
import time
import pytest
from backup_cli import (
backup_file,
get_file_checksum,
get_latest_backup,
get_timestamp,
list_backups,
parse_timestamp,
prune_backups,
restore_backup,
)
@pytest.fixture
def temp_env():
"""Create temporary source file and backup directory."""
with tempfile.TemporaryDirectory() as tmpdir:
source = os.path.join(tmpdir, "source.txt")
backup_dir = os.path.join(tmpdir, "backups")
with open(source, "w") as f:
f.write("test content")
yield source, backup_dir
class TestTimestamp:
def test_get_timestamp_format(self):
ts = get_timestamp()
# Should be YYYYMMDD_HHMMSS_ffffff
assert len(ts) == 22
assert ts[8] == "_"
assert ts[15] == "_"
def test_parse_timestamp(self):
result = parse_timestamp("file_20231225_143000_123456.txt")
assert result is not None
assert result.year == 2023
assert result.month == 12
assert result.day == 25
assert result.hour == 14
assert result.minute == 30
def test_parse_invalid(self):
assert parse_timestamp("invalid") is None
assert parse_timestamp("no_timestamp.txt") is None
class TestChecksum:
def test_checksum_same_content(self, temp_env):
source, backup_dir = temp_env
# Create file with same content
other = os.path.join(os.path.dirname(source), "other.txt")
with open(other, "w") as f:
f.write("test content")
assert get_file_checksum(source) == get_file_checksum(other)
def test_checksum_different_content(self, temp_env):
source, backup_dir = temp_env
other = os.path.join(os.path.dirname(source), "other.txt")
with open(other, "w") as f:
f.write("different content")
assert get_file_checksum(source) != get_file_checksum(other)
def test_checksum_nonexistent(self):
assert get_file_checksum("/nonexistent") == ""
class TestBackupFile:
def test_backup_creates_file(self, temp_env):
source, backup_dir = temp_env
result = backup_file(source, backup_dir)
assert result != ""
assert os.path.exists(result)
assert "source" in result
def test_backup_creates_directory(self, temp_env):
source, backup_dir = temp_env
# backup_dir doesn't exist yet
assert not os.path.exists(backup_dir)
backup_file(source, backup_dir)
assert os.path.exists(backup_dir)
def test_backup_with_verify(self, temp_env):
source, backup_dir = temp_env
result = backup_file(source, backup_dir, verify=True)
assert result != ""
assert get_file_checksum(source) == get_file_checksum(result)
def test_backup_nonexistent_source(self, temp_env):
_, backup_dir = temp_env
result = backup_file("/nonexistent", backup_dir)
assert result == ""
class TestListBackups:
def test_list_empty(self, temp_env):
_, backup_dir = temp_env
backups = list_backups(backup_dir)
assert len(backups) == 0
def test_list_multiple(self, temp_env):
source, backup_dir = temp_env
backup_file(source, backup_dir)
time.sleep(0.1)
backup_file(source, backup_dir)
backups = list_backups(backup_dir)
assert len(backups) == 2
# Should be sorted newest first
assert backups[0][1] >= backups[1][1]
def test_list_with_pattern(self, temp_env):
source, backup_dir = temp_env
backup_file(source, backup_dir)
# Create another source
other = os.path.join(os.path.dirname(source), "other.txt")
with open(other, "w") as f:
f.write("other")
backup_file(other, backup_dir)
backups = list_backups(backup_dir, "source")
assert len(backups) == 1
class TestGetLatestBackup:
def test_get_latest(self, temp_env):
source, backup_dir = temp_env
backup_file(source, backup_dir)
time.sleep(0.1)
second = backup_file(source, backup_dir)
latest = get_latest_backup(backup_dir)
assert latest == second
def test_get_latest_empty(self, temp_env):
_, backup_dir = temp_env
assert get_latest_backup(backup_dir) == ""
class TestRestoreBackup:
def test_restore_success(self, temp_env):
source, backup_dir = temp_env
backup_path = backup_file(source, backup_dir)
dest = os.path.join(os.path.dirname(source), "restored.txt")
assert restore_backup(backup_path, dest) is True
assert os.path.exists(dest)
with open(dest) as f:
assert f.read() == "test content"
def test_restore_with_verify(self, temp_env):
source, backup_dir = temp_env
backup_path = backup_file(source, backup_dir)
dest = os.path.join(os.path.dirname(source), "restored.txt")
assert restore_backup(backup_path, dest, verify=True) is True
def test_restore_nonexistent(self, temp_env):
source, _ = temp_env
dest = os.path.join(os.path.dirname(source), "restored.txt")
assert restore_backup("/nonexistent", dest) is False
class TestPruneBackups:
def test_prune_keeps_recent(self, temp_env):
source, backup_dir = temp_env
# Create 5 backups
for _ in range(5):
backup_file(source, backup_dir)
time.sleep(0.1)
# Prune keeping 3
removed = prune_backups(backup_dir, "", keep=3)
assert removed == 2
backups = list_backups(backup_dir)
assert len(backups) == 3
def test_prune_nothing_to_remove(self, temp_env):
source, backup_dir = temp_env
backup_file(source, backup_dir)
backup_file(source, backup_dir)
removed = prune_backups(backup_dir, "", keep=5)
assert removed == 0
| false
|
backup_tool
| 209
| 0
|
[
"generator",
"context_manager",
"class_definition",
"decorator"
] | 0.927
|
Type inference hints:
Hint: str for variable 'source' [Medium] (usage patterns suggest this type)
Profiling Report
══════════════════════════════════════════════════
Summary
Total estimated instructions: 1
Total estimated allocations: 0
Functions analyzed: 1
Hot Paths
[1] temp_env (100.0% of execution time)
Function Metrics
🔥 temp_env 100.0% time | 1 inst | 0 alloc
Performance Predictions
• Rust's memory layout is more cache-friendly than Python (1.
|
|
example_base64
|
encode_tool.py
|
#!/usr/bin/env python3
"""Base64 Example - Encoding/decoding CLI."""
import argparse
import base64
import sys
def cmd_encode(args):
"""Base64 encode. Depyler: proven to terminate"""
data = sys.stdin.read().strip()
encoded = base64.b64encode(data.encode()).decode()
print(encoded)
def cmd_decode(args):
"""Base64 decode. Depyler: proven to terminate"""
data = sys.stdin.read().strip()
decoded = base64.b64decode(data).decode()
print(decoded)
def cmd_urlsafe_encode(args):
"""URL-safe base64 encode. Depyler: proven to terminate"""
data = sys.stdin.read().strip()
encoded = base64.urlsafe_b64encode(data.encode()).decode()
print(encoded)
def main():
parser = argparse.ArgumentParser(description="Base64 encoding tool")
subparsers = parser.add_subparsers(dest="command", required=True)
subparsers.add_parser("encode")
subparsers.add_parser("decode")
subparsers.add_parser("urlsafe-encode")
args = parser.parse_args()
if args.command == "encode":
cmd_encode(args)
elif args.command == "decode":
cmd_decode(args)
elif args.command == "urlsafe-encode":
cmd_urlsafe_encode(args)
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_base64/encode_tool.py (1231 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_base64/encode_tool.rs (2467 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_base64/Cargo.toml (2 dependencies)
⏱️ Parse time: 49ms
📊 Throughput: 24.5 KB/s
⏱️ Total time: 49ms
| true
|
base64
| 48
| 6
|
[
"stdin_usage"
] | 0.566
| null |
example_base64
|
test_encode_tool.py
|
#!/usr/bin/env python3
"""EXTREME TDD: Tests for base64 CLI."""
import subprocess
from pathlib import Path
SCRIPT = Path(__file__).parent / "encode_tool.py"
SCRIPT = "encode_tool.py"
def run(args, input_text=None):
return subprocess.run(["python3", SCRIPT] + args, capture_output=True, text=True,
input=input_text, cwd=__file__.rsplit("/", 1)[0])
class TestEncode:
def test_encode(self):
result = run(["encode"], "hello")
assert result.returncode == 0
assert "aGVsbG8=" in result.stdout
def test_encode_longer(self):
result = run(["encode"], "hello world")
assert result.returncode == 0
assert "aGVsbG8gd29ybGQ=" in result.stdout
class TestDecode:
def test_decode(self):
result = run(["decode"], "aGVsbG8=")
assert result.returncode == 0
assert "hello" in result.stdout
class TestUrlsafe:
def test_urlsafe_encode(self):
result = run(["urlsafe-encode"], "hello+world")
assert result.returncode == 0
class TestHelp:
def test_help(self):
result = run(["--help"])
assert result.returncode == 0
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_base64/test_encode_tool.py (1152 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_base64/test_encode_tool.rs (2705 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_base64/Cargo.toml (2 dependencies)
⏱️ Parse time: 49ms
📊 Throughput: 22.5 KB/s
⏱️ Total time: 50ms
| true
|
base64
| 39
| 6
|
[
"class_definition"
] | 0.612
| null |
example_batch_processor
|
batch_cli.py
|
#!/usr/bin/env python3
"""Batch processor CLI.
Process items in batches with progress tracking.
"""
import argparse
import json
import sys
import time
from dataclasses import dataclass
@dataclass
class BatchResult:
"""Result of processing a batch."""
batch_index: int
items_processed: int
items_failed: int
duration: float
errors: list[str]
@dataclass
class ProcessingStats:
"""Overall processing statistics."""
total_items: int
processed_items: int
failed_items: int
total_batches: int
completed_batches: int
start_time: float
end_time: float | None
def __init__(self, total_items: int, batch_size: int):
self.total_items = total_items
self.processed_items = 0
self.failed_items = 0
self.total_batches = (total_items + batch_size - 1) // batch_size
self.completed_batches = 0
self.start_time = time.time()
self.end_time = None
def elapsed(self) -> float:
"""Get elapsed time."""
end = self.end_time or time.time()
return end - self.start_time
def progress(self) -> float:
"""Get progress percentage."""
if self.total_items == 0:
return 100.0
return (self.processed_items / self.total_items) * 100
def items_per_second(self) -> float:
"""Get processing rate."""
elapsed = self.elapsed()
if elapsed == 0:
return 0.0
return self.processed_items / elapsed
def estimated_remaining(self) -> float:
"""Get estimated time remaining."""
rate = self.items_per_second()
if rate == 0:
return 0.0
remaining_items = self.total_items - self.processed_items
return remaining_items / rate
def to_dict(self) -> dict:
return {
"total_items": self.total_items,
"processed_items": self.processed_items,
"failed_items": self.failed_items,
"total_batches": self.total_batches,
"completed_batches": self.completed_batches,
"elapsed_seconds": self.elapsed(),
"progress_percent": self.progress(),
"items_per_second": self.items_per_second(),
"estimated_remaining_seconds": self.estimated_remaining(),
}
def chunk_list(items: list, chunk_size: int) -> list[list]:
"""Split list into chunks."""
return [items[i : i + chunk_size] for i in range(0, len(items), chunk_size)]
def process_item_dummy(item: str, fail_rate: float = 0.0) -> tuple[bool, str]:
"""Dummy item processor for demonstration."""
import random
if random.random() < fail_rate:
return False, f"Failed to process: {item}"
return True, ""
def process_batch(
batch: list,
batch_index: int,
processor=None,
delay: float = 0.0,
) -> BatchResult:
"""Process a single batch of items."""
if processor is None:
processor = process_item_dummy
start = time.time()
processed = 0
failed = 0
errors = []
for item in batch:
success, error = processor(item)
if success:
processed += 1
else:
failed += 1
errors.append(error)
if delay > 0:
time.sleep(delay)
return BatchResult(
batch_index=batch_index,
items_processed=processed,
items_failed=failed,
duration=time.time() - start,
errors=errors,
)
def process_all(
items: list,
batch_size: int,
processor=None,
delay: float = 0.0,
on_batch_complete=None,
) -> tuple[ProcessingStats, list[BatchResult]]:
"""Process all items in batches."""
stats = ProcessingStats(len(items), batch_size)
batches = chunk_list(items, batch_size)
results = []
for i, batch in enumerate(batches):
result = process_batch(batch, i, processor, delay)
results.append(result)
stats.processed_items += result.items_processed
stats.failed_items += result.items_failed
stats.completed_batches += 1
if on_batch_complete:
on_batch_complete(stats, result)
stats.end_time = time.time()
return stats, results
def format_progress(stats: ProcessingStats, width: int = 30) -> str:
"""Format progress as a status line."""
progress = stats.progress()
filled = int(width * progress / 100)
bar = "█" * filled + "░" * (width - filled)
rate = stats.items_per_second()
remaining = stats.estimated_remaining()
return (
f"[{bar}] {progress:.1f}% "
f"({stats.processed_items}/{stats.total_items}) "
f"{rate:.1f}/s "
f"ETA: {remaining:.1f}s"
)
def generate_items(count: int, prefix: str = "item") -> list[str]:
"""Generate test items."""
return [f"{prefix}_{i}" for i in range(count)]
def main() -> int:
parser = argparse.ArgumentParser(description="Batch processing tool")
parser.add_argument("input", nargs="?", help="Input file with items (one per line)")
parser.add_argument(
"-n", "--count", type=int, default=100, help="Number of items to generate (if no input)"
)
parser.add_argument("-b", "--batch-size", type=int, default=10, help="Batch size")
parser.add_argument("--delay", type=float, default=0.0, help="Delay per item (seconds)")
parser.add_argument(
"--fail-rate", type=float, default=0.0, help="Simulated failure rate (0.0 to 1.0)"
)
parser.add_argument("--progress", action="store_true", help="Show progress bar")
parser.add_argument("--json", action="store_true", help="Output results as JSON")
parser.add_argument("--dry-run", action="store_true", help="Show batch plan without processing")
args = parser.parse_args()
# Get items
if args.input:
if args.input == "-":
items = [line.strip() for line in sys.stdin if line.strip()]
else:
with open(args.input) as f:
items = [line.strip() for line in f if line.strip()]
else:
items = generate_items(args.count)
if not items:
print("No items to process")
return 0
batches = chunk_list(items, args.batch_size)
if args.dry_run:
print(f"Total items: {len(items)}")
print(f"Batch size: {args.batch_size}")
print(f"Total batches: {len(batches)}")
print("\nBatch plan:")
for i, batch in enumerate(batches):
print(f" Batch {i + 1}: {len(batch)} items")
return 0
# Create processor with fail rate
def processor(item):
return process_item_dummy(item, args.fail_rate)
# Progress callback
def on_progress(stats, result):
if args.progress:
print(f"\r{format_progress(stats)}", end="", flush=True)
stats, results = process_all(
items,
args.batch_size,
processor,
args.delay,
on_progress,
)
if args.progress:
print() # Newline after progress bar
if args.json:
output = {
"stats": stats.to_dict(),
"batches": [
{
"index": r.batch_index,
"processed": r.items_processed,
"failed": r.items_failed,
"duration": r.duration,
"errors": r.errors,
}
for r in results
],
}
print(json.dumps(output, indent=2))
else:
print("\nProcessing complete:")
print(f" Total items: {stats.total_items}")
print(f" Processed: {stats.processed_items}")
print(f" Failed: {stats.failed_items}")
print(f" Total time: {stats.elapsed():.2f}s")
print(f" Rate: {stats.items_per_second():.1f} items/s")
if stats.failed_items > 0:
print(f"\nErrors ({stats.failed_items}):")
for result in results:
for error in result.errors[:5]: # Limit errors shown
print(f" - {error}")
return 0 if stats.failed_items == 0 else 1
if __name__ == "__main__":
sys.exit(main())
| false
|
batch_processor
| 279
| 0
|
[
"context_manager",
"class_definition",
"stdin_usage",
"decorator",
"multiprocessing"
] | 0.652
|
Type inference hints:
Hint: int for variable 'failed' [Medium] (usage patterns suggest this type)
Hint: int for variable 'processed' [Medium] (usage patterns suggest this type)
Hint: list[Any] for variable 'errors' [High] (usage patterns suggest this type)
Type inference hints:
Hint: list[Any] for variable 'items' [High] (usage patterns suggest this type)
Hint: list[Any] for variable 'results' [High] (usage patterns suggest this type)
Type inference hints:
Hint: str for variable 'remaining' [M
|
|
example_batch_processor
|
test_batch_cli.py
|
"""Tests for batch_cli.py"""
import time
from batch_cli import (
ProcessingStats,
chunk_list,
format_progress,
generate_items,
process_all,
process_batch,
process_item_dummy,
)
class TestChunkList:
def test_even_split(self):
items = list(range(10))
chunks = chunk_list(items, 5)
assert len(chunks) == 2
assert chunks[0] == [0, 1, 2, 3, 4]
assert chunks[1] == [5, 6, 7, 8, 9]
def test_uneven_split(self):
items = list(range(7))
chunks = chunk_list(items, 3)
assert len(chunks) == 3
assert chunks[0] == [0, 1, 2]
assert chunks[1] == [3, 4, 5]
assert chunks[2] == [6]
def test_single_item_chunks(self):
items = [1, 2, 3]
chunks = chunk_list(items, 1)
assert len(chunks) == 3
assert all(len(c) == 1 for c in chunks)
def test_larger_chunk_than_items(self):
items = [1, 2]
chunks = chunk_list(items, 10)
assert len(chunks) == 1
assert chunks[0] == [1, 2]
def test_empty_list(self):
chunks = chunk_list([], 5)
assert chunks == []
class TestProcessItemDummy:
def test_success(self):
success, error = process_item_dummy("test", fail_rate=0.0)
assert success is True
assert error == ""
def test_always_fail(self):
success, error = process_item_dummy("test", fail_rate=1.0)
assert success is False
assert "test" in error
class TestProcessBatch:
def test_all_success(self):
items = ["a", "b", "c"]
def always_succeed(item):
return True, ""
result = process_batch(items, 0, always_succeed)
assert result.items_processed == 3
assert result.items_failed == 0
assert result.batch_index == 0
def test_all_fail(self):
items = ["a", "b", "c"]
def always_fail(item):
return False, f"Failed: {item}"
result = process_batch(items, 0, always_fail)
assert result.items_processed == 0
assert result.items_failed == 3
assert len(result.errors) == 3
def test_mixed(self):
items = ["a", "b", "c"]
counter = [0]
def alternate(item):
counter[0] += 1
if counter[0] % 2 == 0:
return False, "Failed"
return True, ""
result = process_batch(items, 0, alternate)
assert result.items_processed == 2
assert result.items_failed == 1
class TestProcessingStats:
def test_init(self):
stats = ProcessingStats(100, 10)
assert stats.total_items == 100
assert stats.total_batches == 10
assert stats.processed_items == 0
def test_init_uneven(self):
stats = ProcessingStats(95, 10)
assert stats.total_batches == 10 # ceil(95/10)
def test_progress(self):
stats = ProcessingStats(100, 10)
assert stats.progress() == 0.0
stats.processed_items = 50
assert stats.progress() == 50.0
stats.processed_items = 100
assert stats.progress() == 100.0
def test_items_per_second(self):
stats = ProcessingStats(100, 10)
stats.processed_items = 50
# Manually set start time
stats.start_time = time.time() - 5 # 5 seconds ago
rate = stats.items_per_second()
assert 9.0 <= rate <= 11.0 # About 10/s
def test_estimated_remaining(self):
stats = ProcessingStats(100, 10)
stats.processed_items = 50
stats.start_time = time.time() - 5 # 5 seconds ago
remaining = stats.estimated_remaining()
assert 4.0 <= remaining <= 6.0 # About 5 seconds
def test_to_dict(self):
stats = ProcessingStats(100, 10)
stats.processed_items = 50
d = stats.to_dict()
assert d["total_items"] == 100
assert d["processed_items"] == 50
assert "progress_percent" in d
class TestProcessAll:
def test_simple(self):
items = list(range(10))
def processor(item):
return True, ""
stats, results = process_all(items, 3, processor)
assert stats.processed_items == 10
assert stats.failed_items == 0
assert len(results) == 4 # ceil(10/3)
def test_with_failures(self):
items = list(range(10))
def half_fail(item):
if item % 2 == 0:
return False, "Even number"
return True, ""
stats, results = process_all(items, 5, half_fail)
assert stats.processed_items == 5
assert stats.failed_items == 5
def test_callback(self):
items = list(range(10))
callback_count = [0]
def processor(item):
return True, ""
def callback(stats, result):
callback_count[0] += 1
process_all(items, 3, processor, 0, callback)
assert callback_count[0] == 4 # 4 batches
class TestFormatProgress:
def test_format(self):
stats = ProcessingStats(100, 10)
stats.processed_items = 50
result = format_progress(stats)
assert "50.0%" in result
assert "50/100" in result
class TestGenerateItems:
def test_generate(self):
items = generate_items(5)
assert len(items) == 5
assert all(item.startswith("item_") for item in items)
def test_custom_prefix(self):
items = generate_items(3, "task")
assert items[0] == "task_0"
| false
|
batch_processor
| 205
| 0
|
[
"class_definition",
"multiprocessing"
] | 0.612
|
Error: Expression type not yet supported: GeneratorExp { element: Binary { op: Eq, left: Call { func: "len", args: [Var("c")], kwargs: [] }, right: Literal(Int(1)) }, generators: [HirComprehension { target: "c", iter: Var("chunks"), conditions: [] }] }
|
|
example_bencode_codec
|
bencode_cli.py
|
#!/usr/bin/env python3
"""Bencode codec CLI.
Encode and decode BitTorrent bencode format.
"""
import argparse
import json
import sys
def encode_int(value: int) -> bytes:
"""Encode integer to bencode."""
return f"i{value}e".encode("ascii")
def encode_bytes(value: bytes) -> bytes:
"""Encode bytes to bencode."""
return f"{len(value)}:".encode("ascii") + value
def encode_string(value: str) -> bytes:
"""Encode string to bencode."""
encoded = value.encode("utf-8")
return f"{len(encoded)}:".encode("ascii") + encoded
def encode_list(value: list) -> bytes:
"""Encode list to bencode."""
result = b"l"
for item in value:
result += bencode_encode(item)
result += b"e"
return result
def encode_dict(value: dict) -> bytes:
"""Encode dict to bencode.
Keys must be strings and are sorted.
"""
result = b"d"
for key in sorted(value.keys()):
result += encode_string(str(key))
result += bencode_encode(value[key])
result += b"e"
return result
def bencode_encode(value) -> bytes:
"""Encode Python value to bencode."""
if isinstance(value, int):
return encode_int(value)
if isinstance(value, bytes):
return encode_bytes(value)
if isinstance(value, str):
return encode_string(value)
if isinstance(value, list):
return encode_list(value)
if isinstance(value, dict):
return encode_dict(value)
raise ValueError(f"Cannot encode type: {type(value)}")
class BencodeDecoder:
"""Stateful bencode decoder."""
def __init__(self, data: bytes):
self.data = data
self.pos = 0
def decode(self):
"""Decode next value."""
if self.pos >= len(self.data):
raise ValueError("Unexpected end of data")
byte = self.data[self.pos]
if byte == ord("i"):
return self.decode_int()
if byte == ord("l"):
return self.decode_list()
if byte == ord("d"):
return self.decode_dict()
if ord("0") <= byte <= ord("9"):
return self.decode_string()
raise ValueError(f"Invalid bencode at position {self.pos}")
def decode_int(self) -> int:
"""Decode bencode integer."""
self.pos += 1 # Skip 'i'
end = self.data.index(ord("e"), self.pos)
value = int(self.data[self.pos : end].decode("ascii"))
self.pos = end + 1
return value
def decode_string(self) -> str:
"""Decode bencode string."""
colon = self.data.index(ord(":"), self.pos)
length = int(self.data[self.pos : colon].decode("ascii"))
self.pos = colon + 1
value = self.data[self.pos : self.pos + length]
self.pos += length
try:
return value.decode("utf-8")
except UnicodeDecodeError:
# Return hex for binary data
return value.hex()
def decode_list(self) -> list:
"""Decode bencode list."""
self.pos += 1 # Skip 'l'
result = []
while self.data[self.pos] != ord("e"):
result.append(self.decode())
self.pos += 1 # Skip 'e'
return result
def decode_dict(self) -> dict:
"""Decode bencode dict."""
self.pos += 1 # Skip 'd'
result = {}
while self.data[self.pos] != ord("e"):
key = self.decode_string()
value = self.decode()
result[key] = value
self.pos += 1 # Skip 'e'
return result
def bencode_decode(data: bytes):
"""Decode bencode data to Python value."""
decoder = BencodeDecoder(data)
return decoder.decode()
def validate_bencode(data: bytes) -> tuple[bool, str]:
"""Validate bencode data.
Returns (is_valid, error_message).
"""
try:
bencode_decode(data)
return True, ""
except Exception as e:
return False, str(e)
def main() -> int:
parser = argparse.ArgumentParser(description="Encode and decode bencode format")
parser.add_argument("input", nargs="?", help="Input file (- for stdin)")
parser.add_argument("-e", "--encode", action="store_true", help="Encode JSON to bencode")
parser.add_argument("-d", "--decode", action="store_true", help="Decode bencode to JSON")
parser.add_argument("--validate", action="store_true", help="Validate bencode data")
parser.add_argument("-o", "--output", help="Output file")
args = parser.parse_args()
# Read input
if args.input is None or args.input == "-":
if args.encode:
data = sys.stdin.read()
else:
data = sys.stdin.buffer.read()
else:
if args.encode:
with open(args.input) as f:
data = f.read()
else:
with open(args.input, "rb") as f:
data = f.read()
# Process
if args.validate:
if isinstance(data, str):
data = data.encode("utf-8")
valid, error = validate_bencode(data)
if valid:
print("Valid bencode")
return 0
print(f"Invalid: {error}", file=sys.stderr)
return 1
if args.encode:
try:
obj = json.loads(data)
except json.JSONDecodeError as e:
print(f"Invalid JSON: {e}", file=sys.stderr)
return 1
result = bencode_encode(obj)
if args.output:
with open(args.output, "wb") as f:
f.write(result)
else:
sys.stdout.buffer.write(result)
sys.stdout.buffer.write(b"\n")
elif args.decode:
try:
obj = bencode_decode(data)
except Exception as e:
print(f"Decode error: {e}", file=sys.stderr)
return 1
result = json.dumps(obj, indent=2)
if args.output:
with open(args.output, "w") as f:
f.write(result)
else:
print(result)
else:
# Default: detect and show info
if isinstance(data, str):
data = data.encode("utf-8")
valid, error = validate_bencode(data)
if valid:
obj = bencode_decode(data)
print(f"Type: {type(obj).__name__}")
if isinstance(obj, dict):
print(f"Keys: {list(obj.keys())}")
elif isinstance(obj, list):
print(f"Length: {len(obj)}")
else:
print(f"Invalid bencode: {error}")
return 1
return 0
if __name__ == "__main__":
sys.exit(main())
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_bencode_codec/bencode_cli.py (6564 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_bencode_codec/bencode_cli.rs (13992 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_bencode_codec/Cargo.toml (3 dependencies)
⏱️ Parse time: 56ms
📊 Throughput: 113.9 KB/s
⏱️ Total time: 56ms
| true
|
bencode_codec
| 238
| 6
|
[
"context_manager",
"class_definition",
"exception_handling",
"stdin_usage",
"multiprocessing"
] | 0.652
| null |
example_bencode_codec
|
test_bencode_cli.py
|
"""Tests for bencode_cli.py"""
from bencode_cli import (
bencode_decode,
bencode_encode,
encode_bytes,
encode_dict,
encode_int,
encode_list,
encode_string,
validate_bencode,
)
class TestEncodeInt:
def test_positive(self):
assert encode_int(42) == b"i42e"
def test_zero(self):
assert encode_int(0) == b"i0e"
def test_negative(self):
assert encode_int(-10) == b"i-10e"
class TestEncodeString:
def test_simple(self):
assert encode_string("hello") == b"5:hello"
def test_empty(self):
assert encode_string("") == b"0:"
def test_unicode(self):
result = encode_string("héllo")
# 6 bytes due to UTF-8 encoding of é
assert result.startswith(b"6:")
class TestEncodeBytes:
def test_simple(self):
assert encode_bytes(b"hello") == b"5:hello"
def test_binary(self):
assert encode_bytes(b"\x00\x01\x02") == b"3:\x00\x01\x02"
class TestEncodeList:
def test_simple(self):
result = encode_list([1, 2, 3])
assert result == b"li1ei2ei3ee"
def test_empty(self):
assert encode_list([]) == b"le"
def test_mixed(self):
result = encode_list(["a", 1])
assert result == b"l1:ai1ee"
class TestEncodeDict:
def test_simple(self):
result = encode_dict({"key": "value"})
assert result == b"d3:key5:valuee"
def test_empty(self):
assert encode_dict({}) == b"de"
def test_sorted_keys(self):
result = encode_dict({"b": 2, "a": 1})
# Keys should be sorted
assert result == b"d1:ai1e1:bi2ee"
class TestBencodeEncode:
def test_roundtrip_int(self):
original = 42
encoded = bencode_encode(original)
decoded = bencode_decode(encoded)
assert decoded == original
def test_roundtrip_string(self):
original = "hello world"
encoded = bencode_encode(original)
decoded = bencode_decode(encoded)
assert decoded == original
def test_roundtrip_list(self):
original = ["a", "b", "c"]
encoded = bencode_encode(original)
decoded = bencode_decode(encoded)
assert decoded == original
def test_roundtrip_dict(self):
original = {"name": "test", "value": 123}
encoded = bencode_encode(original)
decoded = bencode_decode(encoded)
assert decoded == original
def test_complex_structure(self):
original = {
"announce": "http://tracker.example.com",
"info": {
"name": "file.txt",
"length": 1024,
"pieces": ["abc", "def"],
},
}
encoded = bencode_encode(original)
decoded = bencode_decode(encoded)
assert decoded == original
class TestBencodeDecode:
def test_int(self):
assert bencode_decode(b"i42e") == 42
def test_negative_int(self):
assert bencode_decode(b"i-10e") == -10
def test_string(self):
assert bencode_decode(b"5:hello") == "hello"
def test_list(self):
assert bencode_decode(b"li1ei2ei3ee") == [1, 2, 3]
def test_dict(self):
result = bencode_decode(b"d3:key5:valuee")
assert result == {"key": "value"}
def test_nested(self):
data = b"d4:listli1ei2ee4:namei42ee"
result = bencode_decode(data)
assert result == {"list": [1, 2], "name": 42}
class TestValidateBencode:
def test_valid(self):
valid, error = validate_bencode(b"i42e")
assert valid is True
assert error == ""
def test_invalid(self):
valid, error = validate_bencode(b"invalid")
assert valid is False
assert error != ""
def test_truncated(self):
valid, error = validate_bencode(b"i42")
assert valid is False
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_bencode_codec/test_bencode_cli.py (3848 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_bencode_codec/test_bencode_cli.rs (8640 bytes)
⏱️ Parse time: 51ms
📊 Throughput: 73.1 KB/s
⏱️ Total time: 51ms
| true
|
bencode_codec
| 149
| 5
|
[
"class_definition"
] | 0.612
| null |
example_bin
|
bin_tool.py
|
#!/usr/bin/env python3
"""Bin Example - Binary operations CLI."""
import argparse
def main():
parser = argparse.ArgumentParser(description="Binary operations tool")
subs = parser.add_subparsers(dest="cmd", required=True)
t = subs.add_parser("tobin")
t.add_argument("num", type=int)
f = subs.add_parser("frombin")
f.add_argument("binstr")
b = subs.add_parser("bits")
b.add_argument("num", type=int)
args = parser.parse_args()
if args.cmd == "tobin":
print(format(args.num, "b"))
elif args.cmd == "frombin":
print(int(args.binstr, 2))
elif args.cmd == "bits":
count = 0
n = args.num
while n > 0:
count = count + 1
n = n // 2
print(count)
if __name__ == "__main__":
main()
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_bin/bin_tool.py (801 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_bin/bin_tool.rs (2624 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_bin/Cargo.toml (1 dependencies)
⏱️ Parse time: 48ms
📊 Throughput: 16.3 KB/s
⏱️ Total time: 48ms
| true
|
bin
| 33
| 6
|
[] | 0
| null |
example_bin
|
test_bin_tool.py
|
"""Tests for bin_tool - EXTREME TDD."""
import subprocess
from pathlib import Path
SCRIPT = Path(__file__).parent / "bin_tool.py"
def run(cmd):
return subprocess.run(
["python3", str(SCRIPT)] + cmd.split(),
capture_output=True,
text=True,
)
def test_tobin():
r = run("tobin 10")
assert r.returncode == 0
assert r.stdout.strip() == "1010"
def test_frombin():
r = run("frombin 1010")
assert r.returncode == 0
assert r.stdout.strip() == "10"
def test_bits():
r = run("bits 255")
assert r.returncode == 0
assert r.stdout.strip() == "8"
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_bin/test_bin_tool.py (610 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_bin/test_bin_tool.rs (1820 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_bin/Cargo.toml (2 dependencies)
⏱️ Parse time: 47ms
📊 Throughput: 12.6 KB/s
⏱️ Total time: 47ms
| true
|
bin
| 32
| 6
|
[] | 0
| null |
example_binary_codec
|
binary_codec_cli.py
|
#!/usr/bin/env python3
"""Binary Codec CLI.
Binary encoding/decoding utilities (base64, hex, etc).
"""
import argparse
import base64
import sys
def encode_hex(data: bytes) -> str:
"""Encode bytes to hex string."""
return data.hex()
def decode_hex(hex_str: str) -> bytes:
"""Decode hex string to bytes."""
return bytes.fromhex(hex_str)
def encode_hex_upper(data: bytes) -> str:
"""Encode bytes to uppercase hex string."""
return data.hex().upper()
def encode_base64(data: bytes) -> str:
"""Encode bytes to base64 string."""
return base64.b64encode(data).decode("ascii")
def decode_base64(b64_str: str) -> bytes:
"""Decode base64 string to bytes."""
return base64.b64decode(b64_str)
def encode_base64_url(data: bytes) -> str:
"""Encode bytes to URL-safe base64."""
return base64.urlsafe_b64encode(data).decode("ascii")
def decode_base64_url(b64_str: str) -> bytes:
"""Decode URL-safe base64 string."""
return base64.urlsafe_b64decode(b64_str)
def encode_base32(data: bytes) -> str:
"""Encode bytes to base32 string."""
return base64.b32encode(data).decode("ascii")
def decode_base32(b32_str: str) -> bytes:
"""Decode base32 string to bytes."""
return base64.b32decode(b32_str)
def encode_base16(data: bytes) -> str:
"""Encode bytes to base16 (hex) string."""
return base64.b16encode(data).decode("ascii")
def decode_base16(b16_str: str) -> bytes:
"""Decode base16 (hex) string to bytes."""
return base64.b16decode(b16_str)
def bytes_to_binary_string(data: bytes) -> str:
"""Convert bytes to binary string representation."""
return " ".join(format(b, "08b") for b in data)
def binary_string_to_bytes(bin_str: str) -> bytes:
"""Convert binary string to bytes."""
parts = bin_str.split()
return bytes(int(p, 2) for p in parts)
def bytes_to_int_list(data: bytes) -> list[int]:
"""Convert bytes to list of integers."""
return list(data)
def int_list_to_bytes(values: list[int]) -> bytes:
"""Convert list of integers to bytes."""
return bytes(values)
def encode_ascii_hex(text: str) -> str:
"""Encode ASCII text to hex representation."""
return text.encode("ascii").hex()
def decode_ascii_hex(hex_str: str) -> str:
"""Decode hex to ASCII text."""
return bytes.fromhex(hex_str).decode("ascii")
def encode_utf8_hex(text: str) -> str:
"""Encode UTF-8 text to hex representation."""
return text.encode("utf-8").hex()
def decode_utf8_hex(hex_str: str) -> str:
"""Decode hex to UTF-8 text."""
return bytes.fromhex(hex_str).decode("utf-8")
def escape_bytes(data: bytes) -> str:
"""Escape non-printable bytes as \\xNN."""
result = []
for b in data:
if 32 <= b < 127:
result.append(chr(b))
else:
result.append(f"\\x{b:02x}")
return "".join(result)
def unescape_bytes(text: str) -> bytes:
"""Unescape \\xNN sequences to bytes."""
result = bytearray()
i = 0
while i < len(text):
if text[i : i + 2] == "\\x" and i + 4 <= len(text):
hex_chars = text[i + 2 : i + 4]
try:
result.append(int(hex_chars, 16))
i += 4
continue
except ValueError:
pass
result.append(ord(text[i]))
i += 1
return bytes(result)
def run_length_encode(data: bytes) -> bytes:
"""Simple run-length encoding."""
if len(data) == 0:
return b""
result = bytearray()
current = data[0]
count = 1
for i in range(1, len(data)):
if data[i] == current and count < 255:
count += 1
else:
result.append(count)
result.append(current)
current = data[i]
count = 1
result.append(count)
result.append(current)
return bytes(result)
def run_length_decode(data: bytes) -> bytes:
"""Simple run-length decoding."""
result = bytearray()
i = 0
while i + 1 < len(data):
count = data[i]
value = data[i + 1]
result.extend([value] * count)
i += 2
return bytes(result)
def xor_cipher(data: bytes, key: bytes) -> bytes:
"""XOR cipher with repeating key."""
result = bytearray(len(data))
key_len = len(key)
if key_len == 0:
return bytes(data)
for i in range(len(data)):
result[i] = data[i] ^ key[i % key_len]
return bytes(result)
def caesar_cipher(data: bytes, shift: int) -> bytes:
"""Caesar cipher on byte values."""
result = bytearray(len(data))
for i in range(len(data)):
result[i] = (data[i] + shift) & 0xFF
return bytes(result)
def caesar_decipher(data: bytes, shift: int) -> bytes:
"""Reverse Caesar cipher."""
return caesar_cipher(data, -shift)
def bit_reverse_bytes(data: bytes) -> bytes:
"""Reverse bits in each byte."""
result = bytearray(len(data))
for i, b in enumerate(data):
reversed_byte = 0
for j in range(8):
if b & (1 << j):
reversed_byte |= 1 << (7 - j)
result[i] = reversed_byte
return bytes(result)
def nibble_swap(data: bytes) -> bytes:
"""Swap high and low nibbles in each byte."""
result = bytearray(len(data))
for i, b in enumerate(data):
result[i] = ((b & 0x0F) << 4) | ((b >> 4) & 0x0F)
return bytes(result)
def invert_bytes(data: bytes) -> bytes:
"""Invert all bytes (NOT operation)."""
result = bytearray(len(data))
for i, b in enumerate(data):
result[i] = (~b) & 0xFF
return bytes(result)
def calculate_crc8(data: bytes, polynomial: int = 0x07) -> int:
"""Calculate CRC-8 checksum."""
crc = 0
for byte in data:
crc ^= byte
for _ in range(8):
if crc & 0x80:
crc = ((crc << 1) ^ polynomial) & 0xFF
else:
crc = (crc << 1) & 0xFF
return crc
def calculate_crc16(data: bytes, polynomial: int = 0x8005) -> int:
"""Calculate CRC-16 checksum."""
crc = 0xFFFF
for byte in data:
crc ^= byte
for _ in range(8):
if crc & 1:
crc = (crc >> 1) ^ polynomial
else:
crc >>= 1
return crc
def hamming_encode_nibble(nibble: int) -> int:
"""Encode a nibble with Hamming(7,4) code."""
d = [(nibble >> i) & 1 for i in range(4)]
p1 = d[0] ^ d[1] ^ d[3]
p2 = d[0] ^ d[2] ^ d[3]
p3 = d[1] ^ d[2] ^ d[3]
return p1 | (p2 << 1) | (d[0] << 2) | (p3 << 3) | (d[1] << 4) | (d[2] << 5) | (d[3] << 6)
def hamming_decode_byte(encoded: int) -> tuple[int, int]:
"""Decode Hamming(7,4) code, return (data, errors)."""
p1 = ((encoded >> 0) ^ (encoded >> 2) ^ (encoded >> 4) ^ (encoded >> 6)) & 1
p2 = ((encoded >> 1) ^ (encoded >> 2) ^ (encoded >> 5) ^ (encoded >> 6)) & 1
p3 = ((encoded >> 3) ^ (encoded >> 4) ^ (encoded >> 5) ^ (encoded >> 6)) & 1
error_pos = p1 | (p2 << 1) | (p3 << 2)
if error_pos:
encoded ^= 1 << (error_pos - 1)
data = (
((encoded >> 2) & 1)
| (((encoded >> 4) & 1) << 1)
| (((encoded >> 5) & 1) << 2)
| (((encoded >> 6) & 1) << 3)
)
return (data, 1 if error_pos else 0)
def main() -> int:
parser = argparse.ArgumentParser(description="Binary codec CLI")
subparsers = parser.add_subparsers(dest="command", help="Commands")
# hex
hex_p = subparsers.add_parser("hex", help="Hex encode/decode")
hex_p.add_argument("action", choices=["encode", "decode"])
hex_p.add_argument("data")
# base64
b64_p = subparsers.add_parser("base64", help="Base64 encode/decode")
b64_p.add_argument("action", choices=["encode", "decode"])
b64_p.add_argument("data")
b64_p.add_argument("--url-safe", action="store_true")
# rle
rle_p = subparsers.add_parser("rle", help="Run-length encode/decode")
rle_p.add_argument("action", choices=["encode", "decode"])
rle_p.add_argument("hex_data")
# xor
xor_p = subparsers.add_parser("xor", help="XOR cipher")
xor_p.add_argument("hex_data")
xor_p.add_argument("hex_key")
# crc
crc_p = subparsers.add_parser("crc", help="Calculate CRC")
crc_p.add_argument("hex_data")
crc_p.add_argument("--bits", type=int, choices=[8, 16], default=8)
args = parser.parse_args()
if args.command == "hex":
if args.action == "encode":
data = args.data.encode("utf-8")
print(encode_hex(data))
else:
data = decode_hex(args.data)
print(data.decode("utf-8", errors="replace"))
elif args.command == "base64":
if args.action == "encode":
data = args.data.encode("utf-8")
if args.url_safe:
print(encode_base64_url(data))
else:
print(encode_base64(data))
else:
if args.url_safe:
data = decode_base64_url(args.data)
else:
data = decode_base64(args.data)
print(data.decode("utf-8"))
elif args.command == "rle":
if args.action == "encode":
data = bytes.fromhex(args.hex_data)
result = run_length_encode(data)
print(result.hex())
else:
data = bytes.fromhex(args.hex_data)
result = run_length_decode(data)
print(result.hex())
elif args.command == "xor":
data = bytes.fromhex(args.hex_data)
key = bytes.fromhex(args.hex_key)
result = xor_cipher(data, key)
print(result.hex())
elif args.command == "crc":
data = bytes.fromhex(args.hex_data)
if args.bits == 8:
print(f"{calculate_crc8(data):02x}")
else:
print(f"{calculate_crc16(data):04x}")
else:
parser.print_help()
return 0
if __name__ == "__main__":
sys.exit(main())
| false
|
binary_codec
| 357
| 0
|
[
"context_manager",
"exception_handling"
] | 0.652
|
Type inference hints:
Hint: str for variable 'bin_str' [Medium] (usage patterns suggest this type)
Type inference hints:
Hint: str for variable 'b' [Medium] (usage patterns suggest this type)
Hint: list[Any] for variable 'result' [High] (usage patterns suggest this type)
Type inference hints:
Hint: str for variable 'hex_chars' [Medium] (usage patterns suggest this type)
Hint: list[Any] for variable 'text' [High] (usage patterns suggest this type)
Hint: list[Any] for variable 'result' [High] (u
|
|
example_binary_codec
|
test_binary_codec_cli.py
|
"""Tests for binary_codec_cli.py"""
from binary_codec_cli import (
binary_string_to_bytes,
bit_reverse_bytes,
bytes_to_binary_string,
bytes_to_int_list,
caesar_cipher,
caesar_decipher,
calculate_crc8,
calculate_crc16,
decode_ascii_hex,
decode_base16,
decode_base32,
decode_base64,
decode_base64_url,
decode_hex,
decode_utf8_hex,
encode_ascii_hex,
encode_base16,
encode_base32,
encode_base64,
encode_base64_url,
encode_hex,
encode_hex_upper,
encode_utf8_hex,
escape_bytes,
hamming_decode_byte,
hamming_encode_nibble,
int_list_to_bytes,
invert_bytes,
nibble_swap,
run_length_decode,
run_length_encode,
unescape_bytes,
xor_cipher,
)
class TestHexEncoding:
def test_encode(self):
assert encode_hex(b"\x00\xff\xab") == "00ffab"
def test_decode(self):
assert decode_hex("00ffab") == b"\x00\xff\xab"
def test_encode_upper(self):
assert encode_hex_upper(b"\xab\xcd") == "ABCD"
def test_roundtrip(self):
data = b"Hello, World!"
assert decode_hex(encode_hex(data)) == data
class TestBase64Encoding:
def test_encode(self):
assert encode_base64(b"Hello") == "SGVsbG8="
def test_decode(self):
assert decode_base64("SGVsbG8=") == b"Hello"
def test_url_safe_encode(self):
data = b"\xff\xfe\xfd"
result = encode_base64_url(data)
assert "+" not in result
assert "/" not in result
def test_url_safe_roundtrip(self):
data = b"\xff\xfe\xfd"
assert decode_base64_url(encode_base64_url(data)) == data
class TestBase32Encoding:
def test_encode(self):
assert encode_base32(b"Hi") == "JBUQ===="
def test_decode(self):
assert decode_base32("JBUQ====") == b"Hi"
def test_roundtrip(self):
data = b"Test data"
assert decode_base32(encode_base32(data)) == data
class TestBase16Encoding:
def test_encode(self):
assert encode_base16(b"\xab\xcd") == "ABCD"
def test_decode(self):
assert decode_base16("ABCD") == b"\xab\xcd"
class TestBinaryString:
def test_to_binary(self):
assert bytes_to_binary_string(b"\x00\xff") == "00000000 11111111"
def test_from_binary(self):
assert binary_string_to_bytes("00000000 11111111") == b"\x00\xff"
def test_roundtrip(self):
data = b"\x12\x34"
assert binary_string_to_bytes(bytes_to_binary_string(data)) == data
class TestIntList:
def test_to_list(self):
assert bytes_to_int_list(b"\x01\x02\x03") == [1, 2, 3]
def test_from_list(self):
assert int_list_to_bytes([1, 2, 3]) == b"\x01\x02\x03"
class TestAsciiUtf8Hex:
def test_ascii_encode(self):
assert encode_ascii_hex("ABC") == "414243"
def test_ascii_decode(self):
assert decode_ascii_hex("414243") == "ABC"
def test_utf8_encode(self):
result = encode_utf8_hex("世")
assert len(result) == 6 # 3 bytes * 2 hex chars
def test_utf8_decode(self):
encoded = encode_utf8_hex("世界")
assert decode_utf8_hex(encoded) == "世界"
class TestEscape:
def test_escape_printable(self):
assert escape_bytes(b"ABC") == "ABC"
def test_escape_non_printable(self):
assert escape_bytes(b"\x00\x01\x02") == "\\x00\\x01\\x02"
def test_escape_mixed(self):
assert escape_bytes(b"A\x00B") == "A\\x00B"
def test_unescape(self):
assert unescape_bytes("A\\x00B") == b"A\x00B"
def test_roundtrip(self):
data = b"Hello\x00\x01World"
assert unescape_bytes(escape_bytes(data)) == data
class TestRunLengthEncoding:
def test_encode_basic(self):
data = b"\x00\x00\x00\x01\x01"
encoded = run_length_encode(data)
assert encoded == b"\x03\x00\x02\x01"
def test_decode_basic(self):
encoded = b"\x03\x00\x02\x01"
assert run_length_decode(encoded) == b"\x00\x00\x00\x01\x01"
def test_roundtrip(self):
data = b"\xaa\xaa\xaa\xaa\xbb\xbb\xcc"
assert run_length_decode(run_length_encode(data)) == data
def test_empty(self):
assert run_length_encode(b"") == b""
assert run_length_decode(b"") == b""
class TestXorCipher:
def test_basic(self):
data = b"\x00\xff"
key = b"\xff"
result = xor_cipher(data, key)
assert result == b"\xff\x00"
def test_repeating_key(self):
data = b"\x00\x01\x02\x03"
key = b"\xff\x00"
result = xor_cipher(data, key)
assert result == b"\xff\x01\xfd\x03"
def test_roundtrip(self):
data = b"Secret message"
key = b"key"
encrypted = xor_cipher(data, key)
decrypted = xor_cipher(encrypted, key)
assert decrypted == data
def test_empty_key(self):
data = b"\x01\x02\x03"
assert xor_cipher(data, b"") == data
class TestCaesarCipher:
def test_cipher(self):
data = b"\x00\x01\x02"
assert caesar_cipher(data, 1) == b"\x01\x02\x03"
def test_decipher(self):
data = b"\x01\x02\x03"
assert caesar_decipher(data, 1) == b"\x00\x01\x02"
def test_wrap(self):
data = b"\xff"
assert caesar_cipher(data, 1) == b"\x00"
def test_roundtrip(self):
data = b"Hello"
for shift in [1, 5, 100, 255]:
assert caesar_decipher(caesar_cipher(data, shift), shift) == data
class TestBitOperations:
def test_bit_reverse(self):
assert bit_reverse_bytes(b"\x80") == b"\x01"
assert bit_reverse_bytes(b"\x0f") == b"\xf0"
def test_nibble_swap(self):
assert nibble_swap(b"\xab") == b"\xba"
assert nibble_swap(b"\x12\x34") == b"\x21\x43"
def test_invert(self):
assert invert_bytes(b"\x00") == b"\xff"
assert invert_bytes(b"\xff") == b"\x00"
assert invert_bytes(b"\xaa") == b"\x55"
class TestCRC:
def test_crc8_empty(self):
assert calculate_crc8(b"") == 0
def test_crc8_basic(self):
result = calculate_crc8(b"123456789")
assert isinstance(result, int)
assert 0 <= result <= 255
def test_crc16_basic(self):
result = calculate_crc16(b"123456789")
assert isinstance(result, int)
assert 0 <= result <= 65535
def test_crc_deterministic(self):
data = b"Test data"
assert calculate_crc8(data) == calculate_crc8(data)
assert calculate_crc16(data) == calculate_crc16(data)
class TestHammingCode:
def test_encode_nibble(self):
# Hamming(7,4) should produce 7-bit code
result = hamming_encode_nibble(0b0101)
assert 0 <= result < 128
def test_decode_no_error(self):
encoded = hamming_encode_nibble(0b1010)
data, errors = hamming_decode_byte(encoded)
assert data == 0b1010
assert errors == 0
def test_roundtrip(self):
for nibble in range(16):
encoded = hamming_encode_nibble(nibble)
decoded, _ = hamming_decode_byte(encoded)
assert decoded == nibble
class TestEdgeCases:
def test_empty_data(self):
assert encode_hex(b"") == ""
assert decode_hex("") == b""
assert encode_base64(b"") == ""
assert decode_base64("") == b""
def test_single_byte(self):
data = b"\x42"
assert decode_hex(encode_hex(data)) == data
assert decode_base64(encode_base64(data)) == data
def test_all_zeros(self):
data = b"\x00\x00\x00"
assert decode_hex(encode_hex(data)) == data
assert run_length_decode(run_length_encode(data)) == data
def test_all_ones(self):
data = b"\xff\xff\xff"
assert decode_hex(encode_hex(data)) == data
assert xor_cipher(xor_cipher(data, b"\xaa"), b"\xaa") == data
| false
|
binary_codec
| 284
| 0
|
[
"class_definition"
] | 0.612
|
thread 'main' (2445357) panicked at crates/depyler-core/src/direct_rules.rs:1763:28:
expected identifier, found keyword `_`
note: run with `RUST_BACKTRACE=1` environment variable to display a backtrace
|
|
example_binary_search
|
search_cli.py
|
#!/usr/bin/env python3
"""Binary search CLI.
Binary search implementations with various comparators.
"""
import argparse
import sys
from collections.abc import Callable
def binary_search(arr: list, target, key: Callable | None = None) -> int:
"""Basic binary search. Returns index or -1 if not found."""
left, right = 0, len(arr) - 1
while left <= right:
mid = (left + right) // 2
val = key(arr[mid]) if key else arr[mid]
target_val = key(target) if key else target
if val == target_val:
return mid
elif val < target_val:
left = mid + 1
else:
right = mid - 1
return -1
def binary_search_left(arr: list, target, key: Callable | None = None) -> int:
"""Find leftmost position where target could be inserted."""
left, right = 0, len(arr)
while left < right:
mid = (left + right) // 2
val = key(arr[mid]) if key else arr[mid]
target_val = key(target) if key else target
if val < target_val:
left = mid + 1
else:
right = mid
return left
def binary_search_right(arr: list, target, key: Callable | None = None) -> int:
"""Find rightmost position where target could be inserted."""
left, right = 0, len(arr)
while left < right:
mid = (left + right) // 2
val = key(arr[mid]) if key else arr[mid]
target_val = key(target) if key else target
if val <= target_val:
left = mid + 1
else:
right = mid
return left
def count_occurrences(arr: list, target, key: Callable | None = None) -> int:
"""Count occurrences of target in sorted array."""
left_idx = binary_search_left(arr, target, key)
right_idx = binary_search_right(arr, target, key)
return right_idx - left_idx
def find_range(arr: list, target, key: Callable | None = None) -> tuple[int, int]:
"""Find the range [start, end) of target in sorted array."""
left_idx = binary_search_left(arr, target, key)
right_idx = binary_search_right(arr, target, key)
if left_idx == right_idx:
return -1, -1
return left_idx, right_idx - 1
def search_rotated(arr: list, target) -> int:
"""Search in a rotated sorted array."""
if not arr:
return -1
left, right = 0, len(arr) - 1
while left <= right:
mid = (left + right) // 2
if arr[mid] == target:
return mid
# Left half is sorted
if arr[left] <= arr[mid]:
if arr[left] <= target < arr[mid]:
right = mid - 1
else:
left = mid + 1
# Right half is sorted
else:
if arr[mid] < target <= arr[right]:
left = mid + 1
else:
right = mid - 1
return -1
def find_peak(arr: list) -> int:
"""Find a peak element index (element greater than neighbors)."""
if not arr:
return -1
if len(arr) == 1:
return 0
left, right = 0, len(arr) - 1
while left < right:
mid = (left + right) // 2
if arr[mid] < arr[mid + 1]:
left = mid + 1
else:
right = mid
return left
def search_closest(arr: list, target) -> int:
"""Find index of element closest to target."""
if not arr:
return -1
left, right = 0, len(arr) - 1
while left < right:
mid = (left + right) // 2
if arr[mid] < target:
left = mid + 1
else:
right = mid
# Check neighbors for closest
if left == 0:
return 0
if left == len(arr):
return len(arr) - 1
if abs(arr[left] - target) < abs(arr[left - 1] - target):
return left
return left - 1
def search_floor(arr: list, target) -> int:
"""Find largest element <= target."""
if not arr:
return -1
idx = binary_search_right(arr, target)
if idx == 0:
return -1 if arr[0] > target else 0
return idx - 1
def search_ceil(arr: list, target) -> int:
"""Find smallest element >= target."""
if not arr:
return -1
idx = binary_search_left(arr, target)
if idx == len(arr):
return -1
return idx
def main() -> int:
parser = argparse.ArgumentParser(description="Binary search operations")
parser.add_argument("target", type=int, help="Target value to search")
parser.add_argument("values", nargs="*", type=int, help="Sorted values to search in")
parser.add_argument(
"--mode",
choices=["find", "left", "right", "count", "range", "closest", "floor", "ceil"],
default="find",
help="Search mode",
)
parser.add_argument("--rotated", action="store_true", help="Search rotated array")
parser.add_argument("--peak", action="store_true", help="Find peak element")
args = parser.parse_args()
if not args.values:
values = [int(x) for x in sys.stdin.read().split()]
else:
values = args.values
if args.peak:
idx = find_peak(values)
if idx >= 0:
print(f"Peak at index {idx}: {values[idx]}")
else:
print("No peak found")
return 0
if args.rotated:
idx = search_rotated(values, args.target)
elif args.mode == "find":
idx = binary_search(values, args.target)
elif args.mode == "left":
idx = binary_search_left(values, args.target)
elif args.mode == "right":
idx = binary_search_right(values, args.target)
elif args.mode == "count":
count = count_occurrences(values, args.target)
print(f"Count: {count}")
return 0
elif args.mode == "range":
start, end = find_range(values, args.target)
if start == -1:
print("Not found")
else:
print(f"Range: [{start}, {end}]")
return 0
elif args.mode == "closest":
idx = search_closest(values, args.target)
elif args.mode == "floor":
idx = search_floor(values, args.target)
elif args.mode == "ceil":
idx = search_ceil(values, args.target)
else:
idx = -1
if idx >= 0:
print(f"Index: {idx}, Value: {values[idx]}")
else:
print("Not found")
return 0 if idx >= 0 else 1
if __name__ == "__main__":
sys.exit(main())
|
📄 Source: /home/noah/src/reprorusted-python-cli/examples/example_binary_search/search_cli.py (6365 bytes)
📝 Output: /home/noah/src/reprorusted-python-cli/examples/example_binary_search/search_cli.rs (16559 bytes)
📦 Cargo.toml: /home/noah/src/reprorusted-python-cli/examples/example_binary_search/Cargo.toml (3 dependencies)
⏱️ Parse time: 107ms
📊 Throughput: 57.9 KB/s
⏱️ Total time: 107ms
| true
|
binary_search
| 249
| 6
|
[
"context_manager",
"stdin_usage"
] | 0.652
| null |
End of preview. Expand
in Data Studio
Depyler CITL Corpus
Python→Rust transpilation pairs for Compiler-in-the-Loop training.
Dataset Description
606 Python CLI examples with corresponding Rust translations (where available), designed for training transpiler ML models.
| Split | Examples | With Rust | Size |
|---|---|---|---|
| train | 606 | 439 (72.4%) | 957 KB |
Schema
- example_name: str # Directory name (e.g., "example_fibonacci")
- python_file: str # Python filename
- python_code: str # Full Python source
- rust_code: str # Corresponding Rust (empty if not transpiled)
- has_rust: bool # Whether Rust translation exists
- category: str # Extracted category
- python_lines: int # Line count
- rust_lines: int # Line count
- blocking_features: [str] # Detected Python features (v2)
- suspiciousness: float # Tarantula score 0-1 (v2)
- error: str # Transpilation error if failed (v2)
Tarantula Fault Localization
The corpus_insights.json file contains fault localization analysis using the Tarantula algorithm from entrenar CITL.
Priority Features (by suspiciousness score)
| Feature | Score | Categories Affected | Priority |
|---|---|---|---|
| async_await | 0.946 | 4 | P0 |
| generator | 0.927 | 14 | P0 |
| walrus_operator | 0.850 | 1 | P1 |
| lambda | 0.783 | 29 | P1 |
| context_manager | 0.652 | 93 | P2 |
Higher suspiciousness = more correlated with transpilation failures.
Insights File Structure
{
"summary": { "total_pairs": 606, "success_rate": 71.9 },
"tarantula_fault_localization": { "scores": {...} },
"priority_features_to_implement": [...],
"zero_success_categories": [...],
"category_insights": {...}
}
Regenerate with: python3 scripts/generate_insights.py
Usage
from datasets import load_dataset
ds = load_dataset("paiml/depyler-citl")
# Filter to pairs with Rust translations
pairs = ds["train"].filter(lambda x: x["has_rust"])
for row in pairs:
print(f"Python: {row['python_lines']} lines → Rust: {row['rust_lines']} lines")
Related Projects
- depyler - Python→Rust transpiler
- alimentar - Dataset loading library
- entrenar - ML training with CITL
License
MIT
- Downloads last month
- 16